From 2e2b1d27991e15dbf4b0b1bbfe973647511d9e0b Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:34:19 +0200 Subject: [PATCH 01/13] Adds possibility to call BBS to retrieve stats for application's --- collectors/applications.go | 16 +- collectors/collectors.go | 12 +- fetcher/bbs_client.go | 62 + fetcher/fetcher.go | 23 +- fetcher/fetcher_handlers.go | 78 +- fetcher/fetcher_test.go | 2 +- fetcher/worker.go | 10 +- filters/filters.go | 5 +- go.mod | 14 + go.sum | 44 +- main.go | 42 +- models/model.go | 4 + vendor/code.cloudfoundry.org/bbs/.gitignore | 3 + vendor/code.cloudfoundry.org/bbs/CODEOWNERS | 1 + vendor/code.cloudfoundry.org/bbs/LICENSE | 201 + vendor/code.cloudfoundry.org/bbs/NOTICE | 18 + vendor/code.cloudfoundry.org/bbs/README.md | 60 + vendor/code.cloudfoundry.org/bbs/client.go | 1019 +++ .../bbs/encryption/crypt.go | 77 + .../bbs/encryption/encryption_config.go | 42 + .../bbs/encryption/key.go | 49 + .../bbs/encryption/key_manager.go | 39 + .../bbs/encryption/package.go | 1 + .../bbs/events/event_source.go | 269 + .../code.cloudfoundry.org/bbs/events/hub.go | 200 + .../bbs/events/package.go | 1 + .../bbs/format/encoding.go | 102 + .../bbs/format/envelope.go | 61 + .../bbs/format/format.go | 38 + .../bbs/format/package.go | 1 + .../bbs/format/versioner.go | 16 + .../bbs/models/actions.go | 592 ++ .../bbs/models/actions.pb.go | 5157 ++++++++++++ .../bbs/models/actions.proto | 104 + .../bbs/models/actual_lrp.go | 521 ++ .../bbs/models/actual_lrp.pb.go | 3220 +++++++ .../bbs/models/actual_lrp.proto | 75 + .../bbs/models/actual_lrp_requests.go | 346 + .../bbs/models/actual_lrp_requests.pb.go | 4872 +++++++++++ .../bbs/models/actual_lrp_requests.proto | 94 + .../bbs/models/bbs_presence.go | 38 + .../bbs/models/cached_dependency.go | 59 + .../bbs/models/cached_dependency.pb.go | 723 ++ .../bbs/models/cached_dependency.proto | 17 + .../bbs/models/cell_presence.go | 153 + .../bbs/models/cells.pb.go | 1703 ++++ .../bbs/models/cells.proto | 33 + .../bbs/models/certificate_properties.pb.go | 385 + .../bbs/models/certificate_properties.proto | 8 + .../bbs/models/check_definition.go | 55 + .../bbs/models/check_definition.pb.go | 1453 ++++ .../bbs/models/check_definition.proto | 33 + .../bbs/models/desired_lrp.go | 824 ++ .../bbs/models/desired_lrp.pb.go | 7407 +++++++++++++++++ .../bbs/models/desired_lrp.proto | 153 + .../bbs/models/desired_lrp_requests.go | 69 + .../bbs/models/desired_lrp_requests.pb.go | 2806 +++++++ .../bbs/models/desired_lrp_requests.proto | 53 + .../bbs/models/domain.pb.go | 853 ++ .../bbs/models/domain.proto | 22 + .../bbs/models/domains.go | 36 + .../bbs/models/environment_variables.go | 10 + .../bbs/models/environment_variables.pb.go | 436 + .../bbs/models/environment_variables.proto | 10 + .../bbs/models/error.pb.go | 515 ++ .../bbs/models/error.proto | 52 + .../bbs/models/errors.go | 186 + .../bbs/models/evacuation.go | 12 + .../bbs/models/evacuation.pb.go | 2503 ++++++ .../bbs/models/evacuation.proto | 53 + .../bbs/models/events.go | 329 + .../bbs/models/events.pb.go | 4977 +++++++++++ .../bbs/models/events.proto | 99 + .../bbs/models/file.pb.go | 434 + .../bbs/models/file.proto | 10 + .../bbs/models/image_layer.go | 228 + .../bbs/models/image_layer.pb.go | 788 ++ .../bbs/models/image_layer.proto | 34 + .../code.cloudfoundry.org/bbs/models/json.go | 54 + .../bbs/models/log_rate_limit.pb.go | 360 + .../bbs/models/log_rate_limit.proto | 7 + .../bbs/models/lrp_convergence.go | 6 + .../bbs/models/metric_tags.go | 101 + .../bbs/models/metric_tags.pb.go | 460 + .../bbs/models/metric_tags.proto | 18 + .../bbs/models/models.go | 8 + .../bbs/models/modification_tag.go | 20 + .../bbs/models/modification_tag.pb.go | 419 + .../bbs/models/modification_tag.proto | 11 + .../bbs/models/network.pb.go | 522 ++ .../bbs/models/network.proto | 11 + .../bbs/models/package.go | 1 + .../bbs/models/ping.pb.go | 368 + .../bbs/models/ping.proto | 9 + .../bbs/models/restart_calculator.go | 85 + .../bbs/models/routes.go | 77 + .../bbs/models/security_group.pb.go | 1282 +++ .../bbs/models/security_group.proto | 25 + .../bbs/models/security_groups.go | 157 + .../bbs/models/sidecar.go | 32 + .../bbs/models/sidecar.pb.go | 472 ++ .../bbs/models/sidecar.proto | 13 + .../code.cloudfoundry.org/bbs/models/task.go | 206 + .../bbs/models/task.pb.go | 3101 +++++++ .../bbs/models/task.proto | 78 + .../bbs/models/task_requests.go | 125 + .../bbs/models/task_requests.pb.go | 4016 +++++++++ .../bbs/models/task_requests.proto | 78 + .../bbs/models/validator.go | 58 + .../bbs/models/version.go | 5 + .../bbs/models/volume_mount.go | 34 + .../bbs/models/volume_mount.pb.go | 1061 +++ .../bbs/models/volume_mount.proto | 28 + vendor/code.cloudfoundry.org/bbs/package.go | 1 + vendor/code.cloudfoundry.org/bbs/routes.go | 162 + .../bbs/trace/request_id.go | 53 + .../cfhttp/v2/.gitignore | 1 + .../cfhttp/v2/CODEOWNERS | 1 + .../code.cloudfoundry.org/cfhttp/v2/LICENSE | 201 + vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE | 20 + .../code.cloudfoundry.org/cfhttp/v2/README.md | 30 + .../code.cloudfoundry.org/cfhttp/v2/client.go | 129 + .../cfhttp/v2/package.go | 1 + .../cfhttp/v2/staticcheck.conf | 1 + .../code.cloudfoundry.org/lager/v3/.gitignore | 38 + .../code.cloudfoundry.org/lager/v3/CODEOWNERS | 1 + vendor/code.cloudfoundry.org/lager/v3/LICENSE | 201 + vendor/code.cloudfoundry.org/lager/v3/NOTICE | 20 + .../code.cloudfoundry.org/lager/v3/README.md | 102 + .../code.cloudfoundry.org/lager/v3/handler.go | 162 + .../lager/v3/internal/truncate/package.go | 1 + .../lager/v3/internal/truncate/truncate.go | 174 + .../lager/v3/json_redacter.go | 115 + .../code.cloudfoundry.org/lager/v3/logger.go | 217 + .../code.cloudfoundry.org/lager/v3/models.go | 151 + .../lager/v3/reconfigurable_sink.go | 37 + .../lager/v3/redacting_sink.go | 62 + .../lager/v3/slog_sink.go | 63 + .../code.cloudfoundry.org/lager/v3/tools.go | 8 + .../lager/v3/truncating_sink.go | 32 + .../lager/v3/writer_sink.go | 66 + .../go-task/slim-sprig/v3/.editorconfig | 14 + .../go-task/slim-sprig/v3/.gitattributes | 1 + .../go-task/slim-sprig/v3/.gitignore | 2 + .../go-task/slim-sprig/v3/CHANGELOG.md | 383 + .../go-task/slim-sprig/v3/LICENSE.txt | 19 + .../go-task/slim-sprig/v3/README.md | 73 + .../go-task/slim-sprig/v3/Taskfile.yml | 12 + .../go-task/slim-sprig/v3/crypto.go | 24 + .../github.com/go-task/slim-sprig/v3/date.go | 152 + .../go-task/slim-sprig/v3/defaults.go | 163 + .../github.com/go-task/slim-sprig/v3/dict.go | 118 + .../github.com/go-task/slim-sprig/v3/doc.go | 19 + .../go-task/slim-sprig/v3/functions.go | 317 + .../github.com/go-task/slim-sprig/v3/list.go | 464 ++ .../go-task/slim-sprig/v3/network.go | 12 + .../go-task/slim-sprig/v3/numeric.go | 228 + .../go-task/slim-sprig/v3/reflect.go | 28 + .../github.com/go-task/slim-sprig/v3/regex.go | 83 + .../go-task/slim-sprig/v3/strings.go | 189 + .../github.com/go-task/slim-sprig/v3/url.go | 66 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 ++ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 +++++++ .../descriptor/descriptor_gostring.gen.go | 752 ++ .../protoc-gen-gogo/descriptor/helper.go | 390 + vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 + .../github.com/google/pprof/profile/encode.go | 596 ++ .../github.com/google/pprof/profile/filter.go | 274 + .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 + .../google/pprof/profile/legacy_profile.go | 1228 +++ .../github.com/google/pprof/profile/merge.go | 674 ++ .../google/pprof/profile/profile.go | 869 ++ .../github.com/google/pprof/profile/proto.go | 367 + .../github.com/google/pprof/profile/prune.go | 194 + vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + .../onsi/ginkgo/v2/config/deprecated.go | 69 + .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 ++ .../onsi/ginkgo/v2/formatter/formatter.go | 234 + .../ginkgo/v2/ginkgo/build/build_command.go | 80 + .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 54 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 180 + .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 133 + .../v2/ginkgo/generators/generate_command.go | 265 + .../ginkgo/generators/generate_templates.go | 43 + .../v2/ginkgo/generators/generators_common.go | 76 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 173 + .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 + .../ginkgo/internal/profiles_and_reports.go | 227 + .../onsi/ginkgo/v2/ginkgo/internal/run.go | 355 + .../ginkgo/v2/ginkgo/internal/test_suite.go | 284 + .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 + .../v2/ginkgo/internal/verify_version.go | 54 + .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 + .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 301 + .../onsi/ginkgo/v2/ginkgo/outline/import.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 130 + .../v2/ginkgo/outline/outline_command.go | 98 + .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 232 + .../v2/ginkgo/unfocus/unfocus_command.go | 186 + .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 117 + .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 + .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 + .../ginkgo/v2/ginkgo/watch/watch_command.go | 192 + .../interrupt_handler/interrupt_handler.go | 177 + .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../parallel_support/client_server.go | 72 + .../internal/parallel_support/http_client.go | 166 + .../internal/parallel_support/http_server.go | 242 + .../internal/parallel_support/rpc_client.go | 136 + .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 234 + .../ginkgo/v2/reporters/default_reporter.go | 788 ++ .../v2/reporters/deprecated_reporter.go | 149 + .../onsi/ginkgo/v2/reporters/json_report.go | 69 + .../onsi/ginkgo/v2/reporters/junit_report.go | 390 + .../onsi/ginkgo/v2/reporters/reporter.go | 29 + .../ginkgo/v2/reporters/teamcity_report.go | 105 + .../onsi/ginkgo/v2/types/code_location.go | 159 + .../github.com/onsi/ginkgo/v2/types/config.go | 804 ++ .../onsi/ginkgo/v2/types/deprecated_types.go | 141 + .../ginkgo/v2/types/deprecation_support.go | 177 + .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 653 ++ .../onsi/ginkgo/v2/types/file_filter.go | 106 + .../github.com/onsi/ginkgo/v2/types/flags.go | 490 ++ .../onsi/ginkgo/v2/types/label_filter.go | 583 ++ .../onsi/ginkgo/v2/types/report_entry.go | 190 + .../github.com/onsi/ginkgo/v2/types/types.go | 922 ++ .../onsi/ginkgo/v2/types/version.go | 3 + .../github.com/openzipkin/zipkin-go/LICENSE | 201 + .../zipkin-go/idgenerator/idgenerator.go | 130 + .../openzipkin/zipkin-go/model/annotation.go | 60 + .../openzipkin/zipkin-go/model/doc.go | 23 + .../openzipkin/zipkin-go/model/endpoint.go | 50 + .../openzipkin/zipkin-go/model/kind.go | 27 + .../openzipkin/zipkin-go/model/span.go | 161 + .../openzipkin/zipkin-go/model/span_id.go | 44 + .../openzipkin/zipkin-go/model/traceid.go | 75 + vendor/github.com/vito/go-sse/LICENSE.md | 201 + vendor/github.com/vito/go-sse/sse/errors.go | 5 + vendor/github.com/vito/go-sse/sse/event.go | 129 + .../vito/go-sse/sse/event_source.go | 278 + .../github.com/vito/go-sse/sse/read_closer.go | 122 + vendor/go.uber.org/automaxprocs/.codecov.yml | 14 + vendor/go.uber.org/automaxprocs/.gitignore | 33 + vendor/go.uber.org/automaxprocs/CHANGELOG.md | 52 + .../automaxprocs/CODE_OF_CONDUCT.md | 75 + .../go.uber.org/automaxprocs/CONTRIBUTING.md | 81 + vendor/go.uber.org/automaxprocs/LICENSE | 19 + vendor/go.uber.org/automaxprocs/Makefile | 46 + vendor/go.uber.org/automaxprocs/README.md | 71 + .../go.uber.org/automaxprocs/automaxprocs.go | 33 + .../automaxprocs/internal/cgroups/cgroup.go | 79 + .../automaxprocs/internal/cgroups/cgroups.go | 118 + .../automaxprocs/internal/cgroups/cgroups2.go | 176 + .../automaxprocs/internal/cgroups/doc.go | 23 + .../automaxprocs/internal/cgroups/errors.go | 52 + .../internal/cgroups/mountpoint.go | 171 + .../automaxprocs/internal/cgroups/subsys.go | 103 + .../internal/runtime/cpu_quota_linux.go | 75 + .../internal/runtime/cpu_quota_unsupported.go | 31 + .../automaxprocs/internal/runtime/runtime.go | 40 + .../automaxprocs/maxprocs/maxprocs.go | 139 + .../automaxprocs/maxprocs/version.go | 24 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + vendor/golang.org/x/tools/cover/profile.go | 266 + .../x/tools/go/ast/inspector/inspector.go | 286 + .../x/tools/go/ast/inspector/iter.go | 85 + .../x/tools/go/ast/inspector/typeof.go | 230 + .../x/tools/go/ast/inspector/walk.go | 341 + .../x/tools/internal/astutil/edge/edge.go | 295 + vendor/modules.txt | 67 + 291 files changed, 90155 insertions(+), 54 deletions(-) create mode 100644 fetcher/bbs_client.go create mode 100644 vendor/code.cloudfoundry.org/bbs/.gitignore create mode 100644 vendor/code.cloudfoundry.org/bbs/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/bbs/LICENSE create mode 100644 vendor/code.cloudfoundry.org/bbs/NOTICE create mode 100644 vendor/code.cloudfoundry.org/bbs/README.md create mode 100644 vendor/code.cloudfoundry.org/bbs/client.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/crypt.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/key.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/event_source.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/hub.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/encoding.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/envelope.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/format.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/versioner.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cell_presence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cells.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cells.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domain.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domain.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domains.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/error.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/error.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/errors.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/file.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/file.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/json.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/models.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/network.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/network.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/ping.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/ping.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/routes.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_group.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_groups.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/validator.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/version.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/routes.go create mode 100644 vendor/code.cloudfoundry.org/bbs/trace/request_id.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/README.md create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/client.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/package.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf create mode 100644 vendor/code.cloudfoundry.org/lager/v3/.gitignore create mode 100644 vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/lager/v3/LICENSE create mode 100644 vendor/code.cloudfoundry.org/lager/v3/NOTICE create mode 100644 vendor/code.cloudfoundry.org/lager/v3/README.md create mode 100644 vendor/code.cloudfoundry.org/lager/v3/handler.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/json_redacter.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/logger.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/models.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/slog_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/tools.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/writer_sink.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.editorconfig create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.gitattributes create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.gitignore create mode 100644 vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md create mode 100644 vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt create mode 100644 vendor/github.com/go-task/slim-sprig/v3/README.md create mode 100644 vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml create mode 100644 vendor/github.com/go-task/slim-sprig/v3/crypto.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/date.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/defaults.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/dict.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/doc.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/functions.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/list.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/network.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/numeric.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/reflect.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/regex.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/strings.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/url.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/LICENSE create mode 100644 vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/annotation.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/doc.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/endpoint.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/kind.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span_id.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/traceid.go create mode 100644 vendor/github.com/vito/go-sse/LICENSE.md create mode 100644 vendor/github.com/vito/go-sse/sse/errors.go create mode 100644 vendor/github.com/vito/go-sse/sse/event.go create mode 100644 vendor/github.com/vito/go-sse/sse/event_source.go create mode 100644 vendor/github.com/vito/go-sse/sse/read_closer.go create mode 100644 vendor/go.uber.org/automaxprocs/.codecov.yml create mode 100644 vendor/go.uber.org/automaxprocs/.gitignore create mode 100644 vendor/go.uber.org/automaxprocs/CHANGELOG.md create mode 100644 vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/automaxprocs/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/automaxprocs/LICENSE create mode 100644 vendor/go.uber.org/automaxprocs/Makefile create mode 100644 vendor/go.uber.org/automaxprocs/README.md create mode 100644 vendor/go.uber.org/automaxprocs/automaxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/version.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/cover/profile.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/iter.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/walk.go create mode 100644 vendor/golang.org/x/tools/internal/astutil/edge/edge.go diff --git a/collectors/applications.go b/collectors/applications.go index 887d8025..b195c68e 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -224,6 +224,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m process = cProc } } + spaceRel, ok := application.Relationships[constant.RelationshipTypeSpace] if !ok { return fmt.Errorf("could not find space relation in application '%s'", application.GUID) @@ -290,6 +291,19 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m string(application.State), ).Set(float64(process.Instances.Value)) + runningInstances := appSum.RunningInstances + // Use bbs data if available + if len(objs.ProcessActualLRPs) > 0 { + runningsInstances := 0 + lrps, ok := objs.ProcessActualLRPs[process.GUID] + if ok { + for _, lrp := range lrps { + if lrp.State == "RUNNING" { + runningsInstances++ + } + } + } + } c.applicationInstancesRunningMetric.WithLabelValues( application.GUID, application.Name, @@ -298,7 +312,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m space.GUID, space.Name, string(application.State), - ).Set(float64(appSum.RunningInstances)) + ).Set(float64(runningInstances)) c.applicationMemoryMbMetric.WithLabelValues( application.GUID, diff --git a/collectors/collectors.go b/collectors/collectors.go index 1c38ee56..6955ecf0 100644 --- a/collectors/collectors.go +++ b/collectors/collectors.go @@ -14,7 +14,8 @@ type ObjectCollector interface { type Collector struct { workers int - config *fetcher.CFConfig + cfConfig *fetcher.CFConfig + bbsConfig *fetcher.BBSConfig filter *filters.Filter collectors []ObjectCollector } @@ -24,12 +25,14 @@ func NewCollector( environment string, deployment string, workers int, - config *fetcher.CFConfig, + cfConfig *fetcher.CFConfig, + bbsConfig *fetcher.BBSConfig, filter *filters.Filter, ) (*Collector, error) { res := &Collector{ workers: workers, - config: config, + cfConfig: cfConfig, + bbsConfig: bbsConfig, filter: filter, collectors: []ObjectCollector{}, } @@ -118,8 +121,9 @@ func NewCollector( } func (c *Collector) Collect(ch chan<- prometheus.Metric) { - fetcher := fetcher.NewFetcher(c.workers, c.config, c.filter) + fetcher := fetcher.NewFetcher(c.workers, c.cfConfig, c.bbsConfig, c.filter) objs := fetcher.GetObjects() + for _, collector := range c.collectors { collector.Collect(objs, ch) } diff --git a/fetcher/bbs_client.go b/fetcher/bbs_client.go new file mode 100644 index 00000000..bf96b2da --- /dev/null +++ b/fetcher/bbs_client.go @@ -0,0 +1,62 @@ +package fetcher + +import ( + "strings" + "time" + + "code.cloudfoundry.org/bbs" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/bbs/trace" + "code.cloudfoundry.org/lager/v3" +) + +const ( + clientSessionCacheSize int = -1 + maxIdleConnsPerHost int = -1 +) + +type BBSClient struct { + client bbs.Client + config *BBSConfig + logger lager.Logger +} + +type BBSConfig struct { + URL string `yaml:"url"` + Timeout int `yaml:"timeout"` + CAFile string `yaml:"ca_file"` + CertFile string `yaml:"cert_file"` + KeyFile string `yaml:"key_file"` + SkipCertVerify bool `yaml:"skip_cert_verify"` +} + +func NewBBSClient(config *BBSConfig) (*BBSClient, error) { + var err error + bbsClient := BBSClient{ + config: config, + logger: lager.NewLogger("bbs-client"), + } + bbsClientConfig := bbs.ClientConfig{ + URL: config.URL, + Retries: 1, + RequestTimeout: time.Duration(config.Timeout) * time.Second, + } + if strings.HasPrefix(config.URL, "https://") { + bbsClientConfig.IsTLS = true + bbsClientConfig.InsecureSkipVerify = config.SkipCertVerify + bbsClientConfig.CAFile = config.CAFile + bbsClientConfig.CertFile = config.CertFile + bbsClientConfig.KeyFile = config.KeyFile + bbsClientConfig.ClientSessionCacheSize = clientSessionCacheSize + bbsClientConfig.MaxIdleConnsPerHost = maxIdleConnsPerHost + } + bbsClient.client, err = bbs.NewClientWithConfig(bbsClientConfig) + return &bbsClient, err +} + +func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { + traceID := trace.GenerateTraceID() + actualLRPs, err := b.client.ActualLRPs(b.logger, traceID, models.ActualLRPFilter{}) + + return actualLRPs, err +} diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index 6f838744..d1a86507 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -36,14 +36,16 @@ type CFConfig struct { type Fetcher struct { sync.Mutex - config *CFConfig - worker *Worker + cfConfig *CFConfig + bbsConfig *BBSConfig + worker *Worker } -func NewFetcher(threads int, config *CFConfig, filter *filters.Filter) *Fetcher { +func NewFetcher(threads int, config *CFConfig, bbsConfig *BBSConfig, filter *filters.Filter) *Fetcher { return &Fetcher{ - config: config, - worker: NewWorker(threads, filter), + cfConfig: config, + bbsConfig: bbsConfig, + worker: NewWorker(threads, filter), } } @@ -82,20 +84,27 @@ func (c *Fetcher) workInit() { c.worker.PushIf("service_route_bindings", c.fetchServiceRouteBindings, filters.ServiceRouteBindings) c.worker.PushIf("users", c.fetchUsers, filters.Events) c.worker.PushIf("events", c.fetchEvents, filters.Events) + c.worker.PushIf("actual_lrps", c.fetchActualLRPs) } func (c *Fetcher) fetch() *models.CFObjects { result := models.NewCFObjects() - session, err := NewSessionExt(c.config) + session, err := NewSessionExt(c.cfConfig) if err != nil { log.WithError(err).Error("unable to initialize cloud foundry clients") result.Error = err return result } + bbs, err := NewBBSClient(c.bbsConfig) + if err != nil { + log.WithError(err).Error("unable to initialize bbs client") + result.Error = err + return result + } c.workInit() - result.Error = c.worker.Do(session, result) + result.Error = c.worker.Do(session, bbs, result) return result } diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index 87f63657..c5bdad2d 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -2,8 +2,11 @@ package fetcher import ( "fmt" + "regexp" "time" + models2 "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" "code.cloudfoundry.org/cli/resources" "github.com/cloudfoundry/cf_exporter/filters" @@ -17,13 +20,40 @@ func loadIndex[T any](store map[string]T, objects []T, key func(T) string) { } } -func (c *Fetcher) fetchInfo(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchActualLRPs(_ *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { + if bbs == nil { + return nil + } + log.Infof("fetching resources from BBS API") + actualLRPs, err := bbs.GetActualLRPs() + if err == nil { + // match first guid as lrps process_guid field contains process_guid and instance_guid "<:process_guid>-<:instance_guid>" + re := regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}") + for idx := 0; idx < len(actualLRPs); idx++ { + processGUID := actualLRPs[idx].ProcessGuid + match := re.FindString(processGUID) + if match != "" { + processGUID = match + } + _, ok := entry.ProcessActualLRPs[processGUID] + if !ok { + entry.ProcessActualLRPs[processGUID] = []*models2.ActualLRP{} + } + entry.ProcessActualLRPs[processGUID] = append(entry.ProcessActualLRPs[processGUID], actualLRPs[idx]) + } + } else { + log.Errorf("could not fetch actual lrps: %s", err) + } + return err +} + +func (c *Fetcher) fetchInfo(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { var err error entry.Info, err = session.GetInfo() return err } -func (c *Fetcher) fetchOrgs(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchOrgs(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { orgs, _, err := session.V3().GetOrganizations(LargeQuery) if err == nil { loadIndex(entry.Orgs, orgs, func(r resources.Organization) string { return r.GUID }) @@ -31,7 +61,7 @@ func (c *Fetcher) fetchOrgs(session *SessionExt, entry *models.CFObjects) error return err } -func (c *Fetcher) fetchOrgQuotas(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchOrgQuotas(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { quotas, err := session.GetOrganizationQuotas() if err == nil { loadIndex(entry.OrgQuotas, quotas, func(r models.Quota) string { return r.GUID }) @@ -42,7 +72,7 @@ func (c *Fetcher) fetchOrgQuotas(session *SessionExt, entry *models.CFObjects) e // fetchSpaces // 1. silent fail because space may have been deleted between listing and // summary fetching attempt. See cloudfoundry/cf_exporter#85 -func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaces(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { spaces, _, _, err := session.V3().GetSpaces(LargeQuery) if err != nil { return err @@ -53,7 +83,7 @@ func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) erro for idx := 0; idx < total; idx++ { space := spaces[idx] name := fmt.Sprintf("space_summaries %04d/%04d (%s)", idx, total, space.GUID) - c.worker.PushIf(name, func(session *SessionExt, entry *models.CFObjects) error { + c.worker.PushIf(name, func(session *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { spaceSum, err := session.GetSpaceSummary(space.GUID) if err == nil { c.Lock() @@ -73,7 +103,7 @@ func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) erro return nil } -func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { quotas, err := session.GetSpaceQuotas() if err == nil { loadIndex(entry.SpaceQuotas, quotas, func(r models.Quota) string { return r.GUID }) @@ -81,7 +111,7 @@ func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, entry *models.CFObjects) return err } -func (c *Fetcher) fetchApplications(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchApplications(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { apps, err := session.GetApplications() if err == nil { loadIndex(entry.Apps, apps, func(r models.Application) string { return r.GUID }) @@ -89,7 +119,7 @@ func (c *Fetcher) fetchApplications(session *SessionExt, entry *models.CFObjects return err } -func (c *Fetcher) fetchDomains(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchDomains(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { domains, _, err := session.V3().GetDomains(LargeQuery) if err == nil { loadIndex(entry.Domains, domains, func(r resources.Domain) string { return r.GUID }) @@ -97,7 +127,7 @@ func (c *Fetcher) fetchDomains(session *SessionExt, entry *models.CFObjects) err return err } -func (c *Fetcher) fetchProcesses(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchProcesses(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { processes, _, err := session.V3().GetProcesses(LargeQuery) if err != nil { return err @@ -115,7 +145,7 @@ func (c *Fetcher) fetchProcesses(session *SessionExt, entry *models.CFObjects) e return nil } -func (c *Fetcher) fetchRoutes(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchRoutes(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routes, _, err := session.V3().GetRoutes(LargeQuery) if err == nil { loadIndex(entry.Routes, routes, func(r resources.Route) string { return r.GUID }) @@ -123,7 +153,7 @@ func (c *Fetcher) fetchRoutes(session *SessionExt, entry *models.CFObjects) erro return err } -func (c *Fetcher) fetchRouteServices(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchRouteServices(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routes, _, _, err := session.V3().GetRouteBindings(LargeQuery) if err == nil { loadIndex(entry.RoutesBindings, routes, func(r resources.RouteBinding) string { return r.RouteGUID }) @@ -131,7 +161,7 @@ func (c *Fetcher) fetchRouteServices(session *SessionExt, entry *models.CFObject return err } -func (c *Fetcher) fetchSecurityGroups(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSecurityGroups(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { securitygroups, _, err := session.V3().GetSecurityGroups(LargeQuery) if err == nil { loadIndex(entry.SecurityGroups, securitygroups, func(r resources.SecurityGroup) string { return r.GUID }) @@ -139,7 +169,7 @@ func (c *Fetcher) fetchSecurityGroups(session *SessionExt, entry *models.CFObjec return err } -func (c *Fetcher) fetchStacks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchStacks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { stacks, _, err := session.V3().GetStacks(LargeQuery) if err == nil { loadIndex(entry.Stacks, stacks, func(r resources.Stack) string { return r.GUID }) @@ -147,7 +177,7 @@ func (c *Fetcher) fetchStacks(session *SessionExt, entry *models.CFObjects) erro return err } -func (c *Fetcher) fetchBuildpacks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchBuildpacks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { buildpacks, _, err := session.V3().GetBuildpacks(LargeQuery) if err == nil { loadIndex(entry.Buildpacks, buildpacks, func(r resources.Buildpack) string { return r.GUID }) @@ -155,7 +185,7 @@ func (c *Fetcher) fetchBuildpacks(session *SessionExt, entry *models.CFObjects) return err } -func (c *Fetcher) fetchTasks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchTasks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { tasks, err := session.GetTasks() if err == nil { loadIndex(entry.Tasks, tasks, func(r models.Task) string { return r.GUID }) @@ -163,7 +193,7 @@ func (c *Fetcher) fetchTasks(session *SessionExt, entry *models.CFObjects) error return err } -func (c *Fetcher) fetchServiceBrokers(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceBrokers(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { servicebrokers, _, err := session.V3().GetServiceBrokers(LargeQuery) if err == nil { loadIndex(entry.ServiceBrokers, servicebrokers, func(r resources.ServiceBroker) string { return r.GUID }) @@ -171,7 +201,7 @@ func (c *Fetcher) fetchServiceBrokers(session *SessionExt, entry *models.CFObjec return err } -func (c *Fetcher) fetchServiceOfferings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceOfferings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { serviceofferings, _, err := session.V3().GetServiceOfferings(LargeQuery) if err == nil { loadIndex(entry.ServiceOfferings, serviceofferings, func(r resources.ServiceOffering) string { return r.GUID }) @@ -179,7 +209,7 @@ func (c *Fetcher) fetchServiceOfferings(session *SessionExt, entry *models.CFObj return err } -func (c *Fetcher) fetchServiceInstances(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceInstances(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { serviceinstances, _, _, err := session.V3().GetServiceInstances(LargeQuery) if err == nil { loadIndex(entry.ServiceInstances, serviceinstances, func(r resources.ServiceInstance) string { return r.GUID }) @@ -187,7 +217,7 @@ func (c *Fetcher) fetchServiceInstances(session *SessionExt, entry *models.CFObj return err } -func (c *Fetcher) fetchServicePlans(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServicePlans(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { plans, _, err := session.V3().GetServicePlans() if err == nil { loadIndex(entry.ServicePlans, plans, func(r resources.ServicePlan) string { return r.GUID }) @@ -195,7 +225,7 @@ func (c *Fetcher) fetchServicePlans(session *SessionExt, entry *models.CFObjects return err } -func (c *Fetcher) fetchServiceBindings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceBindings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { bindings, _, err := session.V3().GetServiceCredentialBindings(LargeQuery) if err == nil { loadIndex(entry.ServiceBindings, bindings, func(r resources.ServiceCredentialBinding) string { return r.GUID }) @@ -203,7 +233,7 @@ func (c *Fetcher) fetchServiceBindings(session *SessionExt, entry *models.CFObje return err } -func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routeBindings, _, _, err := session.V3().GetRouteBindings(LargeQuery) if err == nil { loadIndex(entry.ServiceRouteBindings, routeBindings, func(r resources.RouteBinding) string { return r.GUID }) @@ -211,7 +241,7 @@ func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, entry *models.C return err } -func (c *Fetcher) fetchIsolationSegments(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchIsolationSegments(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { segments, _, err := session.V3().GetIsolationSegments() if err == nil { loadIndex(entry.Segments, segments, func(r resources.IsolationSegment) string { return r.GUID }) @@ -219,7 +249,7 @@ func (c *Fetcher) fetchIsolationSegments(session *SessionExt, entry *models.CFOb return err } -func (c *Fetcher) fetchUsers(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchUsers(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { users, _, err := session.V3().GetUsers(LargeQuery) if err == nil { loadIndex(entry.Users, users, func(r resources.User) string { return r.GUID }) @@ -230,7 +260,7 @@ func (c *Fetcher) fetchUsers(session *SessionExt, entry *models.CFObjects) error // fetchEvents - // 1. create query param "created_ats[gt]=(now - 15min)". There is no point scrapping more // data since the event metric will filter out events older than last scrap. -func (c *Fetcher) fetchEvents(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchEvents(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { // 1. location, _ := time.LoadLocation("UTC") since := time.Now().Add(-1 * 15 * time.Minute) diff --git a/fetcher/fetcher_test.go b/fetcher/fetcher_test.go index 7d2a8033..844b9003 100644 --- a/fetcher/fetcher_test.go +++ b/fetcher/fetcher_test.go @@ -19,7 +19,7 @@ var _ = ginkgo.Describe("Fetcher", func() { ginkgo.JustBeforeEach(func() { f, err := filters.NewFilter(active...) gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) - fetcher = NewFetcher(10, &CFConfig{}, f) + fetcher = NewFetcher(10, &CFConfig{}, &BBSConfig{}, f) gomega.Ω(fetcher).ShouldNot(gomega.BeNil()) fetcher.workInit() diff --git a/fetcher/worker.go b/fetcher/worker.go index 481b54ff..04780bc4 100644 --- a/fetcher/worker.go +++ b/fetcher/worker.go @@ -9,7 +9,7 @@ import ( log "github.com/sirupsen/logrus" ) -type WorkHandler func(*SessionExt, *models.CFObjects) error +type WorkHandler func(*SessionExt, *BBSClient, *models.CFObjects) error type Work struct { name string @@ -52,9 +52,9 @@ func (c *Worker) Reset() { c.errs = make(chan error, 1000) } -func (c *Worker) Do(session *SessionExt, result *models.CFObjects) error { +func (c *Worker) Do(session *SessionExt, bbs *BBSClient, result *models.CFObjects) error { for i := 0; i < c.threads; i++ { - go c.run(i, session, result) + go c.run(i, session, bbs, result) } return c.Wait() } @@ -72,7 +72,7 @@ func (c *Worker) Wait() error { return nil } -func (c *Worker) run(id int, session *SessionExt, entry *models.CFObjects) { +func (c *Worker) run(id int, session *SessionExt, bbs *BBSClient, entry *models.CFObjects) { for { work, ok := <-c.list if !ok { @@ -80,7 +80,7 @@ func (c *Worker) run(id int, session *SessionExt, entry *models.CFObjects) { } log.Debugf("[%2d] %s", id, work.name) start := time.Now() - err := work.handler(session, entry) + err := work.handler(session, bbs, entry) duration := time.Since(start) if err != nil { log.Errorf("[%2d] %s error: %s", id, work.name, err) diff --git a/filters/filters.go b/filters/filters.go index b190e8cf..b4d3c247 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -22,6 +22,7 @@ const ( Spaces = "spaces" Stacks = "stacks" Tasks = "tasks" + InstancesRunning = "instances_running" ) var ( @@ -68,6 +69,7 @@ func NewFilter(active ...string) (*Filter, error) { Stacks: true, Tasks: false, Events: false, + InstancesRunning: false, }, } @@ -99,6 +101,7 @@ func (f *Filter) setActive(active []string) error { Stacks: false, Tasks: false, Events: false, + InstancesRunning: false, } // enable only given filters @@ -115,7 +118,7 @@ func (f *Filter) setActive(active []string) error { func (f *Filter) Enabled(name string) bool { status, ok := f.activated[name] - return (ok && status) + return ok && status } func (f *Filter) Any(names ...string) bool { diff --git a/go.mod b/go.mod index 7d13df1f..cbf2f22d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/cloudfoundry/cf_exporter go 1.24.1 require ( + code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce + code.cloudfoundry.org/lager/v3 v3.0.3 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0 github.com/onsi/ginkgo v1.16.5 @@ -15,11 +17,13 @@ require ( require ( code.cloudfoundry.org/bytefmt v0.34.0 // indirect + code.cloudfoundry.org/cfhttp/v2 v2.44.0 // indirect code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 // indirect code.cloudfoundry.org/clock v1.32.0 // indirect code.cloudfoundry.org/go-log-cache/v2 v2.0.7 // indirect code.cloudfoundry.org/go-loggregator/v9 v9.2.1 // indirect code.cloudfoundry.org/jsonry v1.1.4 // indirect + code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 // indirect code.cloudfoundry.org/tlsconfig v0.22.0 // indirect code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect @@ -39,10 +43,15 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-logr/logr v1.4.2 // indirect + github.com/go-sql-driver/mysql v1.9.2 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-test/deep v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/jackc/pgx/v5 v5.7.4 // indirect github.com/jessevdk/go-flags v1.6.1 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/lunixbochs/vtclean v1.0.0 // indirect @@ -54,6 +63,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo/v2 v2.23.4 // indirect + github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -61,8 +72,10 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958 // indirect github.com/vito/go-interact v1.0.0 // indirect + github.com/vito/go-sse v1.1.3 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.38.0 // indirect golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect @@ -70,6 +83,7 @@ require ( golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.32.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/grpc v1.71.0 // indirect diff --git a/go.sum b/go.sum index c49129c7..57ee6d69 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,9 @@ +code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 h1:r09wLVYq8X+X1XAJcXgizAdyjptm/tZS4ylWZCg4hzs= +code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2/go.mod h1:XKlGVVXFi5EcHHMPzw3xgONK9PeEZuUbIC43XNwxD10= code.cloudfoundry.org/bytefmt v0.34.0 h1:ErjbaXWjKm6BwdataJUrQxOdtR3uvUoKIXlMaA/smV0= code.cloudfoundry.org/bytefmt v0.34.0/go.mod h1:U3iZQ5YdK0/0QVYYW+QMLXGjBNXfmZmKt/J2V1eKHvc= +code.cloudfoundry.org/cfhttp/v2 v2.44.0 h1:SHb2oWRrMEigXQCfiXwmSkSZLZSZ+ua0AVbavVcefAU= +code.cloudfoundry.org/cfhttp/v2 v2.44.0/go.mod h1:OYSxfFKC0HY7cbeXh2iQVcp4HnbucPBa0naTkOxzKZk= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 h1:Yc9r1p21kEpni9WlG4mwOZw87TB2QlyS9sAEebZ3+ak= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6/go.mod h1:u5FovqC5GGAEbFPz+IdjycDA+gIjhUwqxnu0vbHwVeM= code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce h1:Lg/u08txpuLjC7FQ4Q7DtQB2VZbmCXBbgyZbmBiwvI8= @@ -20,6 +24,8 @@ code.cloudfoundry.org/jsonry v1.1.4 h1:P9N7IlH1/4aRCLcXLgLFj1hkcBmV7muijJzY+K6U4 code.cloudfoundry.org/jsonry v1.1.4/go.mod h1:6aKilShQP7w/Ez76h1El2/n9y2OkHuU56nKSBB9Gp0A= code.cloudfoundry.org/lager/v3 v3.0.3 h1:/UTmadZfIaKuT/whEinSxK1mzRfNu1uPfvjFfGqiwzM= code.cloudfoundry.org/lager/v3 v3.0.3/go.mod h1:Zn5q1SrIuuHjEUE7xerMKt3ztunrJQCZETAo7rV0CH8= +code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 h1:m6Zwwr6HjmdXS/EGwIhar0N6ExQZvmqYSC23MNE+5jc= +code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201/go.mod h1:AwHLRkdXtttLXNB8RHgLfErJ2kKafH62AR2OClhy6xI= code.cloudfoundry.org/tlsconfig v0.22.0 h1:zgzDd4lp++vov8azKP1LdAOBEViPRwm1lg67FHJ4W7Q= code.cloudfoundry.org/tlsconfig v0.22.0/go.mod h1:RX++v+3sJ7bCv0rhFAMryoDAeQeGWJv7bXdwK7DfIzo= code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d h1:M+zXqtXJqcsmpL76aU0tdl1ho23eYa4axYoM4gD62UA= @@ -86,10 +92,13 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -119,14 +128,22 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 h1:gD0vax+4I+mAj+jEChEf25Ia07Jq7kYOFO5PPhAxFl4= +github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -143,8 +160,9 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= @@ -178,8 +196,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -194,6 +212,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= @@ -222,10 +242,14 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tedsuo/ifrit v0.0.0-20230516164442-7862c310ad26 h1:mWCRvpoEMVlslxEvvptKgIUb35va9yj9Oq5wGw/er5I= +github.com/tedsuo/ifrit v0.0.0-20230516164442-7862c310ad26/go.mod h1:0uD3VMXkZ7Bw0ojGCwDzebBBzPBXtzEZeXai+56BLX4= github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958 h1:mueRRuRjR35dEOkHdhpoRcruNgBz0ohG659HxxmcAwA= github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958/go.mod h1:X47ELzhOoLbfFIY0Cql9P6yo3Cdwf2CMX3FVZxRzJPc= github.com/vito/go-interact v1.0.0 h1:niLW3NjGoMWOayoR6iQ8AxWVM1Q4rR8VGZ1mt6uK3BM= github.com/vito/go-interact v1.0.0/go.mod h1:W1mz+UVUZScRM3eUjQhEQiLDnQ+yLnXkB2rjBfGPrXg= +github.com/vito/go-sse v1.1.3 h1:tZOiC+xKmuRPoySTupO6liK7ciSX0B2NKXha5hEtUAE= +github.com/vito/go-sse v1.1.3/go.mod h1:kMjgO+XCwBS0se2X/aS5T4aLBvHWAKOUqZOqXKdyrAg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= @@ -247,6 +271,8 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.step.sm/crypto v0.59.2 h1:pW6qsBW03hd/OFHi2j7hF4sWnEJ0u7rn3reDSqPb46c= go.step.sm/crypto v0.59.2/go.mod h1:bPwUACtYU1CR5ohZTetjBz9CfyF9qql3LllAjw+t3rs= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -278,6 +304,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -320,8 +348,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/main.go b/main.go index 7f2862b7..d620ba7b 100644 --- a/main.go +++ b/main.go @@ -18,6 +18,30 @@ import ( ) var ( + bbsAPIUrl = kingpin.Flag( + "bbs.api_url", "BBS API URL ($CF_EXPORTER_BBS_API_URL)", + ).Envar("CF_EXPORTER_BBS_API_URL").String() + + bbsTimeout = kingpin.Flag( + "bbs.timeout", "BBS API Timeout ($CF_EXPORTER_BBS_TIMEOUT)", + ).Envar("CF_EXPORTER_BBS_TIMEOUT").Default("10").Int() + + bbsCAFile = kingpin.Flag( + "bbs.ca_file", "BBS CA File ($CF_EXPORTER_BBS_CA_FILE)", + ).Envar("CF_EXPORTER_BBS_CA_FILE").Default("").String() + + bbsCertFile = kingpin.Flag( + "bbs.cert_file", "BBS Cert File ($CF_EXPORTER_BBS_CERT_FILE)", + ).Envar("CF_EXPORTER_BBS_CERT_FILE").Default("").String() + + bbsKeyFile = kingpin.Flag( + "bbs.key_file", "BBS Key File ($CF_EXPORTER_BBS_KEY_FILE)", + ).Envar("CF_EXPORTER_BBS_KEY_FILE").String() + + bbsSkipSSLValidation = kingpin.Flag( + "bbs.skip_ssl_verify", "Disable SSL Verify for BBS ($CF_EXPORTER_BBS_SKIP_SSL_VERIFY)", + ).Envar("CF_EXPORTER_BBS_SKIP_SSL_VERIFY").Default("false").Bool() + cfAPIUrl = kingpin.Flag( "cf.api_url", "Cloud Foundry API URL ($CF_EXPORTER_CF_API_URL)", ).Envar("CF_EXPORTER_CF_API_URL").String() @@ -43,7 +67,7 @@ var ( ).Envar("CF_EXPORTER_CF_DEPLOYMENT_NAME").Required().String() filterCollectors = kingpin.Flag( - "filter.collectors", "Comma separated collectors to filter (Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", + "filter.collectors", "Comma separated collectors to filter (Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks,ActualLRPs). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", ).Envar("CF_EXPORTER_FILTER_COLLECTORS").Default("").String() metricsNamespace = kingpin.Flag( @@ -153,7 +177,7 @@ func main() { } log.SetLevel(lvl) - config := &fetcher.CFConfig{ + cfConfig := &fetcher.CFConfig{ URL: *cfAPIUrl, Username: *cfUsername, Password: *cfPassword, @@ -162,6 +186,18 @@ func main() { SkipSSLValidation: *skipSSLValidation, } + bbsConfig := &fetcher.BBSConfig{ + URL: *bbsAPIUrl, + Timeout: *bbsTimeout, + CAFile: *bbsCAFile, + CertFile: *bbsCertFile, + KeyFile: *bbsKeyFile, + SkipCertVerify: *bbsSkipSSLValidation, + } + + log.Infof("cfConfig: %+v", cfConfig) + log.Infof("bbsConfig: %+v", bbsConfig) + active := []string{} if len(*filterCollectors) != 0 { active = strings.Split(*filterCollectors, ",") @@ -172,7 +208,7 @@ func main() { os.Exit(1) } - c, err := collectors.NewCollector(*metricsNamespace, *metricsEnvironment, *cfDeploymentName, *workers, config, filter) + c, err := collectors.NewCollector(*metricsNamespace, *metricsEnvironment, *cfDeploymentName, *workers, cfConfig, bbsConfig, filter) if err != nil { log.Error(err) os.Exit(1) diff --git a/models/model.go b/models/model.go index ffda133a..c7093fbe 100644 --- a/models/model.go +++ b/models/model.go @@ -3,6 +3,8 @@ package models import ( "time" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "code.cloudfoundry.org/cli/resources" "code.cloudfoundry.org/cli/types" @@ -32,6 +34,7 @@ type CFObjects struct { SpaceSummaries map[string]SpaceSummary `json:"space_summaries"` AppSummaries map[string]AppSummary `json:"app_summaries"` AppProcesses map[string][]resources.Process `json:"app_processes"` + ProcessActualLRPs map[string][]*models.ActualLRP `json:"process_actual_lrps"` Events map[string]Event `json:"events"` Users map[string]resources.User `json:"users"` ServiceRouteBindings map[string]resources.RouteBinding `json:"service_route_bindings"` @@ -173,6 +176,7 @@ func NewCFObjects() *CFObjects { SpaceSummaries: map[string]SpaceSummary{}, AppSummaries: map[string]AppSummary{}, AppProcesses: map[string][]resources.Process{}, + ProcessActualLRPs: map[string][]*models.ActualLRP{}, Users: map[string]resources.User{}, Events: map[string]Event{}, ServiceRouteBindings: map[string]resources.RouteBinding{}, diff --git a/vendor/code.cloudfoundry.org/bbs/.gitignore b/vendor/code.cloudfoundry.org/bbs/.gitignore new file mode 100644 index 00000000..008a3fc6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/.gitignore @@ -0,0 +1,3 @@ +*.coverprofile +*.test +cmd/bbs/bbs diff --git a/vendor/code.cloudfoundry.org/bbs/CODEOWNERS b/vendor/code.cloudfoundry.org/bbs/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/bbs/LICENSE b/vendor/code.cloudfoundry.org/bbs/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/bbs/NOTICE b/vendor/code.cloudfoundry.org/bbs/NOTICE new file mode 100644 index 00000000..5f623629 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/NOTICE @@ -0,0 +1,18 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/bbs/README.md b/vendor/code.cloudfoundry.org/bbs/README.md new file mode 100644 index 00000000..07374052 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/README.md @@ -0,0 +1,60 @@ +# BBS + +[![Go Report +Card](https://goreportcard.com/badge/code.cloudfoundry.org/bbs)](https://goreportcard.com/report/code.cloudfoundry.org/bbs) +[![Go +Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/bbs.svg)](https://pkg.go.dev/code.cloudfoundry.org/bbs) + +Bulletin Board System (BBS) is the API to access the database for Diego. +It communicates via protocol-buffer-encoded RPC-style calls over HTTP. + +Diego clients communicate with the BBS via an +[ExternalClient](https://godoc.org/github.com/cloudfoundry/bbs#ExternalClient) +interface. This interface allows clients to create, read, update, +delete, and subscribe to events about Tasks and LRPs. + +> \[!NOTE\] +> +> This repository should be imported as `code.cloudfoundry.org/bbs`. + +# Docs + +- [BBS API Overview](./docs/010-overview.md) +- [The components of a Diego Cell overview](./docs/011-cells.md) +- [Cells API](./docs/012-api-cells.md) +- [Overview of Tasks](./docs/020-tasks.md) +- [Defining Tasks](./docs/021-defining-tasks.md) +- [Task Examples](./docs/022-task-examples.md) +- [Tasks API](./docs/023-api-tasks.md) +- [Tasks Internal API](./docs/024-api-tasks-internal.md) +- [Overview of LRPs: Long Running Processes](./docs/030-lrps.md) +- [Defining LRPs](./docs/031-defining-lrps.md) +- [LRP Examples](./docs/032-lrp-examples.md) +- [LRP API Reference](./docs/033-api-lrps.md) +- [Actual LRPs Internal API](./docs/034-api-lrps-internal.md) +- [BBS DB Schema](./docs/040-schema-description.md) +- [BBS API Versioning + Conventions](./docs/041-revisioning-bbs-api-endpoints.md) +- [BBS Migrations](./docs/042-bbs-migration.md) +- [Domains](./docs/050-domains.md) +- [Container Runtime Environment Variables](./docs/051-environment.md) +- [BBS Events](./docs/052-events.md) +- [Actions](./docs/053-actions.md) +- [BBS Models](./docs/054-common-models.md) + +# Contributing + +See the [Contributing.md](./.github/CONTRIBUTING.md) for more +information on how to contribute. + +# Working Group Charter + +This repository is maintained by [App Runtime +Platform](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/app-runtime-platform.md) +under `Diego` area. + +> \[!IMPORTANT\] +> +> Content in this file is managed by the [CI task +> `sync-readme`](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/blob/main/shared/tasks/sync-readme/metadata.yml) +> and is generated by CI following a convention. diff --git a/vendor/code.cloudfoundry.org/bbs/client.go b/vendor/code.cloudfoundry.org/bbs/client.go new file mode 100644 index 00000000..c210bdd5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/client.go @@ -0,0 +1,1019 @@ +package bbs + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "mime" + "net" + "net/http" + "net/url" + "time" + + "code.cloudfoundry.org/bbs/events" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/bbs/trace" + cfhttp "code.cloudfoundry.org/cfhttp/v2" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/tlsconfig" + "github.com/gogo/protobuf/proto" + "github.com/tedsuo/rata" + "github.com/vito/go-sse/sse" +) + +const ( + ContentTypeHeader = "Content-Type" + XCfRouterErrorHeader = "X-Cf-Routererror" + ProtoContentType = "application/x-protobuf" + KeepContainer = true + DeleteContainer = false + DefaultRetryCount = 3 + + InvalidResponseMessage = "Invalid Response with status code: %d" +) + +var EndpointNotFoundErr = models.NewError(models.Error_InvalidResponse, fmt.Sprintf(InvalidResponseMessage, 404)) + +//go:generate counterfeiter -generate + +//counterfeiter:generate -o fake_bbs/fake_internal_client.go . InternalClient +//counterfeiter:generate -o fake_bbs/fake_client.go . Client + +/* +The InternalClient interface exposes all available endpoints of the BBS server, +including private endpoints which should be used exclusively by internal Diego +components. To interact with the BBS from outside of Diego, the Client +should be used instead. +*/ +type InternalClient interface { + Client + + ClaimActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + StartActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, netInfo *models.ActualLRPNetInfo, internalRoutes []*models.ActualLRPInternalRoute, metricTags map[string]string, routable bool, availabilityZone string) error + CrashActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) error + FailActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, errorMessage string) error + RemoveActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + + EvacuateClaimedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) + EvacuateRunningActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, netInfo *models.ActualLRPNetInfo, internalRoutes []*models.ActualLRPInternalRoute, metricTags map[string]string, routable bool, availabilityZone string) (bool, error) + EvacuateStoppedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) + EvacuateCrashedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (bool, error) + RemoveEvacuatingActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + + StartTask(logger lager.Logger, traceID string, taskGuid string, cellID string) (bool, error) + FailTask(logger lager.Logger, traceID string, taskGuid, failureReason string) error + RejectTask(logger lager.Logger, traceID string, taskGuid, failureReason string) error + CompleteTask(logger lager.Logger, traceID string, taskGuid, cellId string, failed bool, failureReason, result string) error +} + +/* +The External InternalClient can be used to access the BBS's public functionality. +It exposes methods for basic LRP and Task Lifecycles, Domain manipulation, and +event subscription. +*/ +type Client interface { + ExternalTaskClient + ExternalDomainClient + ExternalActualLRPClient + ExternalDesiredLRPClient + ExternalEventClient + + // Returns true if the BBS server is reachable + Ping(logger lager.Logger, traceID string) bool + + // Lists all Cells + Cells(logger lager.Logger, traceID string) ([]*models.CellPresence, error) +} + +/* +The ExternalTaskClient is used to access Diego's ability to run one-off tasks. +More information about this API can be found in the bbs docs: + +https://code.cloudfoundry.org/bbs/tree/master/doc/tasks.md +*/ +type ExternalTaskClient interface { + // Creates a Task from the given TaskDefinition + DesireTask(logger lager.Logger, traceID string, guid string, domain string, def *models.TaskDefinition) error + + // Lists all Tasks + Tasks(logger lager.Logger, traceID string) ([]*models.Task, error) + + // List all Tasks that match filter + TasksWithFilter(logger lager.Logger, traceID string, filter models.TaskFilter) ([]*models.Task, error) + + // Lists all Tasks of the given domain + TasksByDomain(logger lager.Logger, traceID string, domain string) ([]*models.Task, error) + + // Lists all Tasks on the given cell + TasksByCellID(logger lager.Logger, traceID string, cellId string) ([]*models.Task, error) + + // Returns the Task with the given guid + TaskByGuid(logger lager.Logger, traceID string, guid string) (*models.Task, error) + + // Cancels the Task with the given task guid + CancelTask(logger lager.Logger, traceID string, taskGuid string) error + + // Resolves a Task with the given guid + ResolvingTask(logger lager.Logger, traceID string, taskGuid string) error + + // Deletes a completed task with the given guid + DeleteTask(logger lager.Logger, traceID string, taskGuid string) error +} + +/* +The ExternalDomainClient is used to access and update Diego's domains. +*/ +type ExternalDomainClient interface { + // Lists the active domains + Domains(logger lager.Logger, traceID string) ([]string, error) + + // Creates a domain or bumps the ttl on an existing domain + UpsertDomain(logger lager.Logger, traceID string, domain string, ttl time.Duration) error +} + +/* +The ExternalActualLRPClient is used to access and retire Actual LRPs +*/ +type ExternalActualLRPClient interface { + // Returns all ActualLRPs matching the given ActualLRPFilter + ActualLRPs(lager.Logger, string, models.ActualLRPFilter) ([]*models.ActualLRP, error) + + // Returns all ActualLRPGroups matching the given ActualLRPFilter + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroups(lager.Logger, string, models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) + + // Returns all ActualLRPGroups that have the given process guid + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroupsByProcessGuid(logger lager.Logger, traceID string, processGuid string) ([]*models.ActualLRPGroup, error) + + // Returns the ActualLRPGroup with the given process guid and instance index + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroupByProcessGuidAndIndex(logger lager.Logger, traceID string, processGuid string, index int) (*models.ActualLRPGroup, error) + + // Shuts down the ActualLRP matching the given ActualLRPKey, but does not modify the desired state + RetireActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey) error +} + +/* +The ExternalDesiredLRPClient is used to access and manipulate Desired LRPs. +*/ +type ExternalDesiredLRPClient interface { + // Lists all DesiredLRPs that match the given DesiredLRPFilter + DesiredLRPs(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRP, error) + + // Returns the DesiredLRP with the given process guid + DesiredLRPByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRP, error) + + // Returns all DesiredLRPSchedulingInfos that match the given DesiredLRPFilter + DesiredLRPSchedulingInfos(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRPSchedulingInfo, error) + + //Returns the DesiredLRPSchedulingInfo that matches the given process guid + DesiredLRPSchedulingInfoByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRPSchedulingInfo, error) + + // Returns all DesiredLRPRoutingInfos that match the given DesiredLRPFilter + DesiredLRPRoutingInfos(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRP, error) + + // Creates the given DesiredLRP and its corresponding ActualLRPs + DesireLRP(lager.Logger, string, *models.DesiredLRP) error + + // Updates the DesiredLRP matching the given process guid + UpdateDesiredLRP(logger lager.Logger, traceID string, processGuid string, update *models.DesiredLRPUpdate) error + + // Removes the DesiredLRP matching the given process guid + RemoveDesiredLRP(logger lager.Logger, traceID string, processGuid string) error +} + +/* +The ExternalEventClient is used to subscribe to groups of Events. +*/ +type ExternalEventClient interface { + // Deprecated: use SubscribeToInstanceEvents instead + SubscribeToEvents(logger lager.Logger) (events.EventSource, error) + + SubscribeToInstanceEvents(logger lager.Logger) (events.EventSource, error) + SubscribeToTaskEvents(logger lager.Logger) (events.EventSource, error) + + // Deprecated: use SubscribeToInstanceEventsByCellID instead + SubscribeToEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) + + SubscribeToInstanceEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) +} + +type ClientConfig struct { + URL string + IsTLS bool + CAFile string + CertFile string + KeyFile string + ClientSessionCacheSize int + MaxIdleConnsPerHost int + InsecureSkipVerify bool + Retries int + RetryInterval time.Duration // Only affects streaming client, not the http client + RequestTimeout time.Duration // Only affects the http client, not the streaming client +} + +func NewClient(url, caFile, certFile, keyFile string, clientSessionCacheSize, maxIdleConnsPerHost int) (InternalClient, error) { + return NewClientWithConfig(ClientConfig{ + URL: url, + IsTLS: true, + CAFile: caFile, + CertFile: certFile, + KeyFile: keyFile, + ClientSessionCacheSize: clientSessionCacheSize, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + }) +} + +func NewSecureSkipVerifyClient(url, certFile, keyFile string, clientSessionCacheSize, maxIdleConnsPerHost int) (InternalClient, error) { + return NewClientWithConfig(ClientConfig{ + URL: url, + IsTLS: true, + CAFile: "", + CertFile: certFile, + KeyFile: keyFile, + ClientSessionCacheSize: clientSessionCacheSize, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + InsecureSkipVerify: true, + }) +} + +func NewClientWithConfig(cfg ClientConfig) (InternalClient, error) { + if cfg.Retries == 0 { + cfg.Retries = DefaultRetryCount + } + + if cfg.RetryInterval == 0 { + cfg.RetryInterval = time.Second + } + + if cfg.InsecureSkipVerify { + cfg.CAFile = "" + } + + if cfg.IsTLS { + return newSecureClient(cfg) + } else { + return newClient(cfg), nil + } +} + +func newClient(cfg ClientConfig) *client { + return &client{ + httpClient: cfhttp.NewClient(cfhttp.WithRequestTimeout(cfg.RequestTimeout)), + streamingHTTPClient: cfhttp.NewClient(cfhttp.WithStreamingDefaults()), + reqGen: rata.NewRequestGenerator(cfg.URL, Routes), + requestRetryCount: cfg.Retries, + retryInterval: cfg.RetryInterval, + } +} +func newSecureClient(cfg ClientConfig) (InternalClient, error) { + bbsURL, err := url.Parse(cfg.URL) + if err != nil { + return nil, err + } + if bbsURL.Scheme != "https" { + return nil, errors.New("Expected https URL") + } + + var clientOpts []tlsconfig.ClientOption + if !cfg.InsecureSkipVerify { + clientOpts = append(clientOpts, tlsconfig.WithAuthorityFromFile(cfg.CAFile)) + } + + tlsConfig, err := tlsconfig.Build( + tlsconfig.WithInternalServiceDefaults(), + tlsconfig.WithIdentityFromFile(cfg.CertFile, cfg.KeyFile), + ).Client(clientOpts...) + if err != nil { + return nil, err + } + tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(cfg.ClientSessionCacheSize) + + tlsConfig.InsecureSkipVerify = cfg.InsecureSkipVerify + + httpClient := cfhttp.NewClient( + cfhttp.WithRequestTimeout(cfg.RequestTimeout), + cfhttp.WithTLSConfig(tlsConfig), + cfhttp.WithMaxIdleConnsPerHost(cfg.MaxIdleConnsPerHost), + ) + streamingClient := cfhttp.NewClient( + cfhttp.WithStreamingDefaults(), + cfhttp.WithTLSConfig(tlsConfig), + cfhttp.WithMaxIdleConnsPerHost(cfg.MaxIdleConnsPerHost), + ) + + return &client{ + httpClient: httpClient, + streamingHTTPClient: streamingClient, + reqGen: rata.NewRequestGenerator(cfg.URL, Routes), + requestRetryCount: cfg.Retries, + retryInterval: cfg.RetryInterval, + }, nil +} + +type client struct { + httpClient *http.Client + streamingHTTPClient *http.Client + reqGen *rata.RequestGenerator + requestRetryCount int + retryInterval time.Duration +} + +func (c *client) Ping(logger lager.Logger, traceID string) bool { + response := models.PingResponse{} + err := c.doRequest(logger, traceID, PingRoute_r0, nil, nil, nil, &response) + if err != nil { + return false + } + return response.Available +} + +func (c *client) Domains(logger lager.Logger, traceID string) ([]string, error) { + response := models.DomainsResponse{} + err := c.doRequest(logger, traceID, DomainsRoute_r0, nil, nil, nil, &response) + if err != nil { + return nil, err + } + return response.Domains, response.Error.ToError() +} + +func (c *client) UpsertDomain(logger lager.Logger, traceID string, domain string, ttl time.Duration) error { + request := models.UpsertDomainRequest{ + Domain: domain, + Ttl: uint32(ttl.Seconds()), + } + response := models.UpsertDomainResponse{} + err := c.doRequest(logger, traceID, UpsertDomainRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) ActualLRPs(logger lager.Logger, traceID string, filter models.ActualLRPFilter) ([]*models.ActualLRP, error) { + request := models.ActualLRPsRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + ProcessGuid: filter.ProcessGuid, + } + if filter.Index != nil { + request.SetIndex(*filter.Index) + } + response := models.ActualLRPsResponse{} + err := c.doRequest(logger, traceID, ActualLRPsRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrps, response.Error.ToError() +} + +// Deprecated: use ActualLRPs instead +func (c *client) ActualLRPGroups(logger lager.Logger, traceID string, filter models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupsRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + } + response := models.ActualLRPGroupsResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupsRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroups, response.Error.ToError() +} + +// Deprecated: use ActaulLRPs instead +func (c *client) ActualLRPGroupsByProcessGuid(logger lager.Logger, traceID string, processGuid string) ([]*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupsByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.ActualLRPGroupsResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupsByProcessGuidRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroups, response.Error.ToError() +} + +// Deprecated: use ActaulLRPs instead +func (c *client) ActualLRPGroupByProcessGuidAndIndex(logger lager.Logger, traceID string, processGuid string, index int) (*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupByProcessGuidAndIndexRequest{ + ProcessGuid: processGuid, + Index: int32(index), + } + response := models.ActualLRPGroupResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupByProcessGuidAndIndexRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroup, response.Error.ToError() +} + +func (c *client) ClaimActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.ClaimActualLRPRequest{ + ProcessGuid: key.ProcessGuid, + Index: key.Index, + ActualLrpInstanceKey: instanceKey, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, ClaimActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) StartActualLRP(logger lager.Logger, + traceID string, + key *models.ActualLRPKey, + instanceKey *models.ActualLRPInstanceKey, + netInfo *models.ActualLRPNetInfo, + internalRoutes []*models.ActualLRPInternalRoute, + metricTags map[string]string, + routable bool, + availabilityZone string, +) error { + response := models.ActualLRPLifecycleResponse{} + request := &models.StartActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + ActualLrpInternalRoutes: internalRoutes, + MetricTags: metricTags, + AvailabilityZone: availabilityZone, + } + request.SetRoutable(routable) + err := c.doRequest(logger, traceID, StartActualLRPRoute_r1, nil, nil, request, &response) + if err != nil && err == EndpointNotFoundErr { + err = c.doRequest(logger, traceID, StartActualLRPRoute_r0, nil, nil, &models.StartActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + }, &response) + } + + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) CrashActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) error { + request := models.CrashActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ErrorMessage: errorMessage, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, CrashActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) FailActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, errorMessage string) error { + request := models.FailActualLRPRequest{ + ActualLrpKey: key, + ErrorMessage: errorMessage, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, FailActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) RetireActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey) error { + request := models.RetireActualLRPRequest{ + ActualLrpKey: key, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, RetireActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) RemoveActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.RemoveActualLRPRequest{ + ProcessGuid: key.ProcessGuid, + Index: key.Index, + ActualLrpInstanceKey: instanceKey, + } + + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, RemoveActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) EvacuateClaimedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateClaimedActualLRPRoute_r0, KeepContainer, &models.EvacuateClaimedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + }) +} + +func (c *client) EvacuateCrashedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateCrashedActualLRPRoute_r0, DeleteContainer, &models.EvacuateCrashedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ErrorMessage: errorMessage, + }) +} + +func (c *client) EvacuateStoppedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateStoppedActualLRPRoute_r0, DeleteContainer, &models.EvacuateStoppedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + }) +} + +func (c *client) EvacuateRunningActualLRP(logger lager.Logger, + traceID string, + key *models.ActualLRPKey, + instanceKey *models.ActualLRPInstanceKey, + netInfo *models.ActualLRPNetInfo, + internalRoutes []*models.ActualLRPInternalRoute, + metricTags map[string]string, + routable bool, + availabilityZone string, +) (bool, error) { + request := &models.EvacuateRunningActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + ActualLrpInternalRoutes: internalRoutes, + MetricTags: metricTags, + AvailabilityZone: availabilityZone, + } + request.SetRoutable(routable) + keepContainer, err := c.doEvacRequest(logger, traceID, EvacuateRunningActualLRPRoute_r1, KeepContainer, request) + if err != nil && err == EndpointNotFoundErr { + keepContainer, err = c.doEvacRequest(logger, traceID, EvacuateRunningActualLRPRoute_r0, KeepContainer, &models.EvacuateRunningActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + }) + } + + return keepContainer, err +} + +func (c *client) RemoveEvacuatingActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.RemoveEvacuatingActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + } + + response := models.RemoveEvacuatingActualLRPResponse{} + err := c.doRequest(logger, traceID, RemoveEvacuatingActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + + return response.Error.ToError() +} + +func (c *client) DesiredLRPs(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPsResponse{} + err := c.doRequest(logger, traceID, DesiredLRPsRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrps, response.Error.ToError() +} + +func (c *client) DesiredLRPByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRP, error) { + request := models.DesiredLRPByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.DesiredLRPResponse{} + err := c.doRequest(logger, traceID, DesiredLRPByProcessGuidRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrp, response.Error.ToError() +} + +func (c *client) DesiredLRPSchedulingInfos(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRPSchedulingInfo, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPSchedulingInfosResponse{} + err := c.doRequest(logger, traceID, DesiredLRPSchedulingInfosRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrpSchedulingInfos, response.Error.ToError() +} + +func (c *client) DesiredLRPSchedulingInfoByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRPSchedulingInfo, error) { + request := models.DesiredLRPByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.DesiredLRPSchedulingInfoByProcessGuidResponse{} + err := c.doRequest(logger, traceID, DesiredLRPSchedulingInfoByProcessGuid_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrpSchedulingInfo, response.Error.ToError() +} + +func (c *client) DesiredLRPRoutingInfos(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPsResponse{} + err := c.doRequest(logger, traceID, DesiredLRPRoutingInfosRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrps, response.Error.ToError() +} + +func (c *client) doDesiredLRPLifecycleRequest(logger lager.Logger, traceID string, route string, request proto.Message) error { + response := models.DesiredLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) DesireLRP(logger lager.Logger, traceID string, desiredLRP *models.DesiredLRP) error { + request := models.DesireLRPRequest{ + DesiredLrp: desiredLRP, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, DesireDesiredLRPRoute_r2, &request) +} + +func (c *client) UpdateDesiredLRP(logger lager.Logger, traceID string, processGuid string, update *models.DesiredLRPUpdate) error { + request := models.UpdateDesiredLRPRequest{ + ProcessGuid: processGuid, + Update: update, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, UpdateDesiredLRPRoute_r0, &request) +} + +func (c *client) RemoveDesiredLRP(logger lager.Logger, traceID string, processGuid string) error { + request := models.RemoveDesiredLRPRequest{ + ProcessGuid: processGuid, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, RemoveDesiredLRPRoute_r0, &request) +} + +func (c *client) Tasks(logger lager.Logger, traceID string) ([]*models.Task, error) { + request := models.TasksRequest{} + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksWithFilter(logger lager.Logger, traceID string, filter models.TaskFilter) ([]*models.Task, error) { + request := models.TasksRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksByDomain(logger lager.Logger, traceID string, domain string) ([]*models.Task, error) { + request := models.TasksRequest{ + Domain: domain, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksByCellID(logger lager.Logger, traceID string, cellId string) ([]*models.Task, error) { + request := models.TasksRequest{ + CellId: cellId, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TaskByGuid(logger lager.Logger, traceID string, taskGuid string) (*models.Task, error) { + request := models.TaskByGuidRequest{ + TaskGuid: taskGuid, + } + response := models.TaskResponse{} + err := c.doRequest(logger, traceID, TaskByGuidRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Task, response.Error.ToError() +} + +func (c *client) doTaskLifecycleRequest(logger lager.Logger, traceID string, route string, request proto.Message) error { + response := models.TaskLifecycleResponse{} + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) DesireTask(logger lager.Logger, traceID string, taskGuid, domain string, taskDef *models.TaskDefinition) error { + route := DesireTaskRoute_r2 + request := models.DesireTaskRequest{ + TaskGuid: taskGuid, + Domain: domain, + TaskDefinition: taskDef, + } + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) StartTask(logger lager.Logger, traceID string, taskGuid string, cellId string) (bool, error) { + request := &models.StartTaskRequest{ + TaskGuid: taskGuid, + CellId: cellId, + } + response := &models.StartTaskResponse{} + err := c.doRequest(logger, traceID, StartTaskRoute_r0, nil, nil, request, response) + if err != nil { + return false, err + } + return response.ShouldStart, response.Error.ToError() +} + +func (c *client) CancelTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := CancelTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) ResolvingTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := ResolvingTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) DeleteTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := DeleteTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +// Deprecated: use CancelTask instead +func (c *client) FailTask(logger lager.Logger, traceID string, taskGuid string, failureReason string) error { + request := models.FailTaskRequest{ + TaskGuid: taskGuid, + FailureReason: failureReason, + } + route := FailTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) RejectTask(logger lager.Logger, traceID string, taskGuid string, rejectionReason string) error { + request := models.RejectTaskRequest{ + TaskGuid: taskGuid, + RejectionReason: rejectionReason, + } + route := RejectTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) CompleteTask(logger lager.Logger, traceID string, taskGuid string, cellId string, failed bool, failureReason, result string) error { + request := models.CompleteTaskRequest{ + TaskGuid: taskGuid, + CellId: cellId, + Failed: failed, + FailureReason: failureReason, + Result: result, + } + route := CompleteTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) subscribeToEvents(route string, cellId string) (events.EventSource, error) { + request := models.EventsByCellId{ + CellId: cellId, + } + messageBody, err := proto.Marshal(&request) + if err != nil { + return nil, err + } + + sseConfig := sse.Config{ + Client: c.streamingHTTPClient, + RetryParams: sse.RetryParams{ + RetryInterval: c.retryInterval, + MaxRetries: uint16(c.requestRetryCount), + }, + RequestCreator: func() *http.Request { + request, err := c.reqGen.CreateRequest(route, nil, bytes.NewReader(messageBody)) + if err != nil { + panic(err) // totally shouldn't happen + } + + return request + }, + } + + eventSource, err := sseConfig.Connect() + if err != nil { + return nil, err + } + + return events.NewEventSource(eventSource), nil +} + +// Deprecated: use SubscribeToInstanceEvents instead +func (c *client) SubscribeToEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(LRPGroupEventStreamRoute_r1, "") +} + +func (c *client) SubscribeToInstanceEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(LRPInstanceEventStreamRoute_r1, "") +} + +func (c *client) SubscribeToTaskEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(TaskEventStreamRoute_r1, "") +} + +// Deprecated: use SubscribeToInstanceEventsByCellID instead +func (c *client) SubscribeToEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) { + return c.subscribeToEvents(LRPGroupEventStreamRoute_r1, cellId) +} + +func (c *client) SubscribeToInstanceEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) { + return c.subscribeToEvents(LRPInstanceEventStreamRoute_r1, cellId) +} + +func (c *client) Cells(logger lager.Logger, traceID string) ([]*models.CellPresence, error) { + response := models.CellsResponse{} + err := c.doRequest(logger, traceID, CellsRoute_r0, nil, nil, nil, &response) + if err != nil { + return nil, err + } + return response.Cells, response.Error.ToError() +} + +func (c *client) createRequest(traceID string, requestName string, params rata.Params, queryParams url.Values, message proto.Message) (*http.Request, error) { + var messageBody []byte + var err error + if message != nil { + messageBody, err = proto.Marshal(message) + if err != nil { + return nil, err + } + } + + request, err := c.reqGen.CreateRequest(requestName, params, bytes.NewReader(messageBody)) + if err != nil { + return nil, err + } + + request.URL.RawQuery = queryParams.Encode() + request.ContentLength = int64(len(messageBody)) + request.Header.Set("Content-Type", ProtoContentType) + request.Header.Set(trace.RequestIdHeader, traceID) + return request, nil +} + +func (c *client) doEvacRequest(logger lager.Logger, traceID string, route string, defaultKeepContainer bool, request proto.Message) (bool, error) { + var response models.EvacuationResponse + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return defaultKeepContainer, err + } + + return response.KeepContainer, response.Error.ToError() +} + +func (c *client) doRequest(logger lager.Logger, traceID string, requestName string, params rata.Params, queryParams url.Values, requestBody, responseBody proto.Message) error { + logger = logger.Session("do-request") + var err error + var request *http.Request + + for attempts := 0; attempts < c.requestRetryCount; attempts++ { + logger.Debug("creating-request", lager.Data{"attempt": attempts + 1, "request_name": requestName}) + request, err = c.createRequest(traceID, requestName, params, queryParams, requestBody) + if err != nil { + logger.Error("failed-creating-request", err) + return err + } + + logger.Debug("doing-request", lager.Data{"attempt": attempts + 1, "request_path": request.URL.Path}) + + start := time.Now().UnixNano() + err = c.do(request, responseBody) + finish := time.Now().UnixNano() + + if err != nil { + logger.Error("failed-doing-request", err) + if netErr, ok := err.(net.Error); ok { + if netErr.Timeout() { + err = models.NewError(models.Error_Timeout, err.Error()) + } + } + time.Sleep(500 * time.Millisecond) + } else { + logger.Debug("complete", lager.Data{"request_path": request.URL.Path, "duration_in_ns": finish - start}) + break + } + } + return err +} + +func (c *client) do(request *http.Request, responseObject proto.Message) error { + response, err := c.httpClient.Do(request) + if err != nil { + return err + } + defer func() { + // don't worry about errors when closing the body + _ = response.Body.Close() + }() + + var parsedContentType string + if contentType, ok := response.Header[ContentTypeHeader]; ok { + parsedContentType, _, _ = mime.ParseMediaType(contentType[0]) + } + + if routerError, ok := response.Header[XCfRouterErrorHeader]; ok { + return models.NewError(models.Error_RouterError, routerError[0]) + } + + if parsedContentType == ProtoContentType { + return handleProtoResponse(response, responseObject) + } else { + return handleNonProtoResponse(response) + } +} + +func handleProtoResponse(response *http.Response, responseObject proto.Message) error { + if responseObject == nil { + return models.NewError(models.Error_InvalidRequest, "responseObject cannot be nil") + } + + buf, err := io.ReadAll(response.Body) + if err != nil { + return models.NewError(models.Error_InvalidResponse, fmt.Sprint("failed to read body: ", err.Error())) + } + + err = proto.Unmarshal(buf, responseObject) + if err != nil { + return models.NewError(models.Error_InvalidProtobufMessage, fmt.Sprint("failed to unmarshal proto: ", err.Error())) + } + + return nil +} + +func handleNonProtoResponse(response *http.Response) error { + if response.StatusCode == 404 { + return EndpointNotFoundErr + } + + if response.StatusCode > 299 { + return models.NewError(models.Error_InvalidResponse, fmt.Sprintf(InvalidResponseMessage, response.StatusCode)) + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go b/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go new file mode 100644 index 00000000..3d70071c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go @@ -0,0 +1,77 @@ +package encryption + +import ( + "crypto/cipher" + "fmt" + "io" +) + +const NonceSize = 12 + +type Encrypted struct { + Nonce []byte + KeyLabel string + CipherText []byte +} + +type Encryptor interface { + Encrypt(plaintext []byte) (Encrypted, error) +} + +type Decryptor interface { + Decrypt(encrypted Encrypted) ([]byte, error) +} + +//go:generate counterfeiter -generate + +//counterfeiter:generate . Cryptor + +type Cryptor interface { + Encryptor + Decryptor +} + +type cryptor struct { + keyManager KeyManager + prng io.Reader +} + +func NewCryptor(keyManager KeyManager, prng io.Reader) Cryptor { + return &cryptor{ + keyManager: keyManager, + prng: prng, + } +} + +func (c *cryptor) Encrypt(plaintext []byte) (Encrypted, error) { + key := c.keyManager.EncryptionKey() + + aead, err := cipher.NewGCM(key.Block()) + if err != nil { + return Encrypted{}, fmt.Errorf("Unable to create GCM-wrapped cipher: %q", err) + } + + nonce := make([]byte, aead.NonceSize()) + _, err = io.ReadFull(c.prng, nonce) + if err != nil { + return Encrypted{}, fmt.Errorf("Unable to generate random nonce: %q", err) + } + + ciphertext := aead.Seal(nil, nonce, plaintext, nil) + return Encrypted{KeyLabel: key.Label(), Nonce: nonce, CipherText: ciphertext}, nil +} + +func (d *cryptor) Decrypt(encrypted Encrypted) ([]byte, error) { + key := d.keyManager.DecryptionKey(encrypted.KeyLabel) + if key == nil { + return nil, fmt.Errorf("Key with label %q was not found", encrypted.KeyLabel) + } + + aead, err := cipher.NewGCM(key.Block()) + if err != nil { + return nil, fmt.Errorf("Unable to create GCM-wrapped cipher: %q", err) + } + + // #nosec G407 - G407 is incorrectly flagging Decrypt calls that use the nonce provided in the encrypted data. we randomize this for encryption, which is where it matters. https://github.com/securego/gosec/issues/1209 + return aead.Open(nil, encrypted.Nonce, encrypted.CipherText, nil) +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go b/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go new file mode 100644 index 00000000..8e8c883a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go @@ -0,0 +1,42 @@ +package encryption + +import "errors" + +type EncryptionConfig struct { + ActiveKeyLabel string `json:"active_key_label"` + EncryptionKeys map[string]string `json:"encryption_keys"` +} + +func (ef *EncryptionConfig) Parse() (Key, []Key, error) { + if len(ef.EncryptionKeys) == 0 { + return nil, nil, errors.New("Must have at least one encryption key set") + } + + if len(ef.ActiveKeyLabel) == 0 { + return nil, nil, errors.New("Must select an active encryption key") + } + + var encryptionKey Key + + labelsToKeys := map[string]Key{} + + for label, phrase := range ef.EncryptionKeys { + key, err := NewKey(label, phrase) + if err != nil { + return nil, nil, err + } + labelsToKeys[label] = key + } + + encryptionKey, ok := labelsToKeys[ef.ActiveKeyLabel] + if !ok { + return nil, nil, errors.New("The selected active key must be listed on the encryption keys flag") + } + + keys := []Key{} + for _, v := range labelsToKeys { + keys = append(keys, v) + } + + return encryptionKey, keys, nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/key.go b/vendor/code.cloudfoundry.org/bbs/encryption/key.go new file mode 100644 index 00000000..715a6438 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/key.go @@ -0,0 +1,49 @@ +package encryption + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/sha256" + "errors" +) + +//counterfeiter:generate . Key + +type Key interface { + Label() string + Block() cipher.Block +} + +type key struct { + block cipher.Block + label string +} + +func NewKey(label, phrase string) (Key, error) { + if label == "" { + return nil, errors.New("A key label is required") + } + + if len(label) > 127 { + return nil, errors.New("Key label is longer than 127 bytes") + } + + hash := sha256.Sum256([]byte(phrase)) + block, err := aes.NewCipher(hash[:]) + if err != nil { + return nil, err + } + + return &key{ + label: label, + block: block, + }, nil +} + +func (k *key) Label() string { + return k.label +} + +func (k *key) Block() cipher.Block { + return k.block +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go b/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go new file mode 100644 index 00000000..441cc28c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go @@ -0,0 +1,39 @@ +package encryption + +import "fmt" + +type keyManager struct { + encryptionKey Key + decryptionKeys map[string]Key +} + +type KeyManager interface { + EncryptionKey() Key + DecryptionKey(label string) Key +} + +func NewKeyManager(encryptionKey Key, decryptionKeys []Key) (KeyManager, error) { + decryptionKeyMap := map[string]Key{ + encryptionKey.Label(): encryptionKey, + } + + for _, key := range decryptionKeys { + if existingKey, ok := decryptionKeyMap[key.Label()]; ok && key != existingKey { + return nil, fmt.Errorf("Multiple keys with the same label: %q", key.Label()) + } + decryptionKeyMap[key.Label()] = key + } + + return &keyManager{ + encryptionKey: encryptionKey, + decryptionKeys: decryptionKeyMap, + }, nil +} + +func (m *keyManager) EncryptionKey() Key { + return m.encryptionKey +} + +func (m *keyManager) DecryptionKey(label string) Key { + return m.decryptionKeys[label] +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/package.go b/vendor/code.cloudfoundry.org/bbs/encryption/package.go new file mode 100644 index 00000000..2aaa08df --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/package.go @@ -0,0 +1 @@ +package encryption // import "code.cloudfoundry.org/bbs/encryption" diff --git a/vendor/code.cloudfoundry.org/bbs/events/event_source.go b/vendor/code.cloudfoundry.org/bbs/events/event_source.go new file mode 100644 index 00000000..a8271690 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/event_source.go @@ -0,0 +1,269 @@ +package events + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "strconv" + + "code.cloudfoundry.org/bbs/models" + "github.com/gogo/protobuf/proto" + "github.com/vito/go-sse/sse" +) + +var ( + ErrUnrecognizedEventType = errors.New("unrecognized event type") + ErrSourceClosed = errors.New("source closed") + ErrNoData = errors.New("event with no data") +) + +type invalidPayloadError struct { + payloadType string + protoErr error +} + +func NewInvalidPayloadError(payloadType string, protoErr error) error { + return invalidPayloadError{payloadType: payloadType, protoErr: protoErr} +} + +func (e invalidPayloadError) Error() string { + return fmt.Sprintf("invalid protobuf payload of type %s: %s", e.payloadType, e.protoErr.Error()) +} + +type rawEventSourceError struct { + rawError error +} + +func NewRawEventSourceError(rawError error) error { + return rawEventSourceError{rawError: rawError} +} + +func (e rawEventSourceError) Error() string { + return fmt.Sprintf("raw event source error: %s", e.rawError.Error()) +} + +type closeError struct { + err error +} + +func NewCloseError(err error) error { + return closeError{err: err} +} + +func (e closeError) Error() string { + return fmt.Sprintf("error closing raw source: %s", e.err.Error()) +} + +func NewEventFromModelEvent(eventID int, event models.Event) (sse.Event, error) { + payload, err := proto.Marshal(event) + if err != nil { + return sse.Event{}, err + } + + encodedPayload := base64.StdEncoding.EncodeToString(payload) + return sse.Event{ + ID: strconv.Itoa(eventID), + Name: string(event.EventType()), + Data: []byte(encodedPayload), + }, nil +} + +//go:generate counterfeiter -generate + +//counterfeiter:generate -o eventfakes/fake_event_source.go . EventSource + +// EventSource provides sequential access to a stream of events. +type EventSource interface { + // Next reads the next event from the source. If the connection is lost, it + // automatically reconnects. + // + // If the end of the stream is reached cleanly (which should actually never + // happen), io.EOF is returned. If called after or during Close, + // ErrSourceClosed is returned. + Next() (models.Event, error) + + // Close releases the underlying response, interrupts any in-flight Next, and + // prevents further calls to Next. + Close() error +} + +//counterfeiter:generate -o eventfakes/fake_raw_event_source.go . RawEventSource + +type RawEventSource interface { + Next() (sse.Event, error) + Close() error +} + +type eventSource struct { + rawEventSource RawEventSource +} + +func NewEventSource(raw RawEventSource) EventSource { + return &eventSource{ + rawEventSource: raw, + } +} + +func (e *eventSource) Next() (models.Event, error) { + rawEvent, err := e.rawEventSource.Next() + if err != nil { + switch err { + case io.EOF: + return nil, err + + case sse.ErrSourceClosed: + return nil, ErrSourceClosed + + default: + return nil, NewRawEventSourceError(err) + } + } + + return parseRawEvent(rawEvent) +} + +func (e *eventSource) Close() error { + err := e.rawEventSource.Close() + if err != nil { + return NewCloseError(err) + } + + return nil +} + +func parseRawEvent(rawEvent sse.Event) (models.Event, error) { + data, err := base64.StdEncoding.DecodeString(string(rawEvent.Data)) + if len(data) == 0 { + return nil, NewInvalidPayloadError(rawEvent.Name, ErrNoData) + } else if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + switch rawEvent.Name { + case models.EventTypeDesiredLRPCreated: + event := new(models.DesiredLRPCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeDesiredLRPChanged: + event := new(models.DesiredLRPChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeDesiredLRPRemoved: + event := new(models.DesiredLRPRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPCreated: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPChanged: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPRemoved: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPCrashed: + event := new(models.ActualLRPCrashedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskCreated: + event := new(models.TaskCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskChanged: + event := new(models.TaskChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskRemoved: + event := new(models.TaskRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceCreated: + event := new(models.ActualLRPInstanceCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceChanged: + event := new(models.ActualLRPInstanceChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceRemoved: + event := new(models.ActualLRPInstanceRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + } + + return nil, ErrUnrecognizedEventType +} diff --git a/vendor/code.cloudfoundry.org/bbs/events/hub.go b/vendor/code.cloudfoundry.org/bbs/events/hub.go new file mode 100644 index 00000000..0950825d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/hub.go @@ -0,0 +1,200 @@ +package events + +import ( + "errors" + "sync" + + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/lager/v3" +) + +const MAX_PENDING_SUBSCRIBER_EVENTS = 1024 + +var ErrReadFromClosedSource = errors.New("read from closed source") +var ErrSendToClosedSource = errors.New("send to closed source") +var ErrSourceAlreadyClosed = errors.New("source already closed") +var ErrSlowConsumer = errors.New("slow consumer") + +var ErrSubscribedToClosedHub = errors.New("subscribed to closed hub") +var ErrHubAlreadyClosed = errors.New("hub already closed") + +//counterfeiter:generate -o eventfakes/fake_hub.go . Hub +type Hub interface { + Subscribe() (EventSource, error) + Emit(models.Event) + Close() error + + RegisterCallback(func(count int)) + UnregisterCallback() +} + +type hub struct { + subscribers map[*hubSource]struct{} + closed bool + lock sync.Mutex + logger lager.Logger + + cb func(count int) +} + +func NewHub(logger lager.Logger) Hub { + return &hub{ + subscribers: make(map[*hubSource]struct{}), + logger: logger, + } +} + +func (hub *hub) RegisterCallback(cb func(int)) { + hub.lock.Lock() + hub.cb = cb + size := len(hub.subscribers) + hub.lock.Unlock() + if cb != nil { + cb(size) + } +} + +func (hub *hub) UnregisterCallback() { + hub.lock.Lock() + hub.cb = nil + hub.lock.Unlock() +} + +func (hub *hub) Subscribe() (EventSource, error) { + hub.lock.Lock() + + if hub.closed { + hub.lock.Unlock() + + return nil, ErrSubscribedToClosedHub + } + + sub := newSource(MAX_PENDING_SUBSCRIBER_EVENTS, hub.subscriberClosed) + hub.subscribers[sub] = struct{}{} + cb := hub.cb + size := len(hub.subscribers) + hub.lock.Unlock() + + if cb != nil { + cb(size) + } + return sub, nil +} + +func (hub *hub) Emit(event models.Event) { + hub.lock.Lock() + size := len(hub.subscribers) + + for sub := range hub.subscribers { + err := sub.send(event) + if err != nil { + hub.logger.Error("got-error-sending-event", err) + delete(hub.subscribers, sub) + } + } + + var cb func(int) + if len(hub.subscribers) != size { + cb = hub.cb + size = len(hub.subscribers) + } + hub.lock.Unlock() + + if cb != nil { + cb(size) + } +} + +func (hub *hub) Close() error { + hub.lock.Lock() + defer hub.lock.Unlock() + + if hub.closed { + return ErrHubAlreadyClosed + } + + hub.closeSubscribers() + hub.closed = true + if hub.cb != nil { + hub.cb(0) + } + return nil +} + +func (hub *hub) closeSubscribers() { + for sub := range hub.subscribers { + _ = sub.Close() + } + hub.subscribers = nil +} + +func (hub *hub) subscriberClosed(source *hubSource) { + hub.lock.Lock() + delete(hub.subscribers, source) + cb := hub.cb + count := len(hub.subscribers) + hub.lock.Unlock() + + if cb != nil { + cb(count) + } +} + +type hubSource struct { + events chan models.Event + closeCallback func(*hubSource) + closed bool + lock sync.Mutex +} + +func newSource(maxPendingEvents int, closeCallback func(*hubSource)) *hubSource { + return &hubSource{ + events: make(chan models.Event, maxPendingEvents), + closeCallback: closeCallback, + } +} + +func (source *hubSource) Next() (models.Event, error) { + event, ok := <-source.events + if !ok { + return nil, ErrReadFromClosedSource + } + return event, nil +} + +func (source *hubSource) Close() error { + source.lock.Lock() + defer source.lock.Unlock() + + if source.closed { + return ErrSourceAlreadyClosed + } + close(source.events) + source.closed = true + go source.closeCallback(source) + return nil +} + +func (source *hubSource) send(event models.Event) error { + source.lock.Lock() + + if source.closed { + source.lock.Unlock() + return ErrSendToClosedSource + } + + select { + case source.events <- event: + source.lock.Unlock() + return nil + + default: + source.lock.Unlock() + err := source.Close() + if err != nil { + return err + } + + return ErrSlowConsumer + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/events/package.go b/vendor/code.cloudfoundry.org/bbs/events/package.go new file mode 100644 index 00000000..b59b892f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/package.go @@ -0,0 +1 @@ +package events // import "code.cloudfoundry.org/bbs/events" diff --git a/vendor/code.cloudfoundry.org/bbs/format/encoding.go b/vendor/code.cloudfoundry.org/bbs/format/encoding.go new file mode 100644 index 00000000..37776147 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/encoding.go @@ -0,0 +1,102 @@ +package format + +import ( + "encoding/base64" + "fmt" + + "code.cloudfoundry.org/bbs/encryption" +) + +type Encoding [EncodingOffset]byte + +var ( + BASE64_ENCRYPTED Encoding = [2]byte{'0', '2'} +) + +const EncodingOffset int = 2 + +type encoder struct { + cryptor encryption.Cryptor +} + +type Encoder interface { + Encode(payload []byte) ([]byte, error) + Decode(payload []byte) ([]byte, error) +} + +func NewEncoder(cryptor encryption.Cryptor) Encoder { + return &encoder{cryptor: cryptor} +} + +func (e *encoder) Encode(payload []byte) ([]byte, error) { + encrypted, err := e.encrypt(payload) + if err != nil { + return nil, err + } + encoded := encodeBase64(encrypted) + return append(BASE64_ENCRYPTED[:], encoded...), nil +} + +func (e *encoder) Decode(payload []byte) ([]byte, error) { + encoding := encodingFromPayload(payload) + switch encoding { + case BASE64_ENCRYPTED: + encrypted, err := decodeBase64(payload[EncodingOffset:]) + if err != nil { + return nil, err + } + return e.decrypt(encrypted) + default: + return nil, fmt.Errorf("Unknown encoding: %v", encoding) + } +} + +func (e *encoder) encrypt(cleartext []byte) ([]byte, error) { + encrypted, err := e.cryptor.Encrypt(cleartext) + if err != nil { + return nil, err + } + + payload := []byte{} + payload = append(payload, byte(len(encrypted.KeyLabel))) + payload = append(payload, []byte(encrypted.KeyLabel)...) + payload = append(payload, encrypted.Nonce...) + payload = append(payload, encrypted.CipherText...) + + return payload, nil +} + +func (e *encoder) decrypt(encryptedData []byte) ([]byte, error) { + labelLength := encryptedData[0] + encryptedData = encryptedData[1:] + + label := string(encryptedData[:labelLength]) + encryptedData = encryptedData[labelLength:] + + nonce := encryptedData[:encryption.NonceSize] + ciphertext := encryptedData[encryption.NonceSize:] + + return e.cryptor.Decrypt(encryption.Encrypted{ + KeyLabel: label, + Nonce: nonce, + CipherText: ciphertext, + }) +} + +func encodeBase64(unencodedPayload []byte) []byte { + encodedLen := base64.StdEncoding.EncodedLen(len(unencodedPayload)) + encodedPayload := make([]byte, encodedLen) + base64.StdEncoding.Encode(encodedPayload, unencodedPayload) + return encodedPayload +} + +func decodeBase64(encodedPayload []byte) ([]byte, error) { + decodedLen := base64.StdEncoding.DecodedLen(len(encodedPayload)) + decodedPayload := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(decodedPayload, encodedPayload) + return decodedPayload[:n], err +} + +func encodingFromPayload(payload []byte) Encoding { + return Encoding{payload[0], payload[1]} +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/envelope.go b/vendor/code.cloudfoundry.org/bbs/format/envelope.go new file mode 100644 index 00000000..dcd306d0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/envelope.go @@ -0,0 +1,61 @@ +package format + +import ( + "code.cloudfoundry.org/lager/v3" + "github.com/gogo/protobuf/proto" +) + +type EnvelopeFormat byte + +const ( + PROTO EnvelopeFormat = 2 +) + +const EnvelopeOffset int = 2 + +func UnmarshalEnvelope(logger lager.Logger, unencodedPayload []byte, model Model) error { + return UnmarshalProto(logger, unencodedPayload[EnvelopeOffset:], model) +} + +// dummy version for backward compatability. old BBS used to serialize proto +// messages with a 2-byte header that has the envelope format (i.e. PROTO) and +// the version of the model (e.g. 0, 1 or 2). Adding the version was a +// pre-mature optimization that we decided to get rid of in #133215113. That +// said, we have the ensure the header is a 2-byte to avoid breaking older BBS +// Deprecated: do not use, see note above +const version = 0 + +func MarshalEnvelope(model Model) ([]byte, error) { + var payload []byte + var err error + + payload, err = MarshalProto(model) + + if err != nil { + return nil, err + } + + data := make([]byte, 0, len(payload)+EnvelopeOffset) + data = append(data, byte(PROTO), byte(version)) + data = append(data, payload...) + + return data, nil +} + +func UnmarshalProto(logger lager.Logger, marshaledPayload []byte, model Model) error { + err := proto.Unmarshal(marshaledPayload, model) + if err != nil { + logger.Error("failed-to-proto-unmarshal-payload", err) + return err + } + return nil +} + +func MarshalProto(v Model) ([]byte, error) { + bytes, err := proto.Marshal(v) + if err != nil { + return nil, err + } + + return bytes, nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/format.go b/vendor/code.cloudfoundry.org/bbs/format/format.go new file mode 100644 index 00000000..b1f384ff --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/format.go @@ -0,0 +1,38 @@ +package format + +import ( + "code.cloudfoundry.org/bbs/encryption" + "code.cloudfoundry.org/lager/v3" +) + +type serializer struct { + encoder Encoder +} + +type Serializer interface { + Marshal(logger lager.Logger, model Model) ([]byte, error) + Unmarshal(logger lager.Logger, encodedPayload []byte, model Model) error +} + +func NewSerializer(cryptor encryption.Cryptor) Serializer { + return &serializer{ + encoder: NewEncoder(cryptor), + } +} + +func (s *serializer) Marshal(logger lager.Logger, model Model) ([]byte, error) { + envelopedPayload, err := MarshalEnvelope(model) + if err != nil { + return nil, err + } + + return s.encoder.Encode(envelopedPayload) +} + +func (s *serializer) Unmarshal(logger lager.Logger, encodedPayload []byte, model Model) error { + unencodedPayload, err := s.encoder.Decode(encodedPayload) + if err != nil { + return err + } + return UnmarshalEnvelope(logger, unencodedPayload, model) +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/package.go b/vendor/code.cloudfoundry.org/bbs/format/package.go new file mode 100644 index 00000000..d9000165 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/package.go @@ -0,0 +1 @@ +package format // import "code.cloudfoundry.org/bbs/format" diff --git a/vendor/code.cloudfoundry.org/bbs/format/versioner.go b/vendor/code.cloudfoundry.org/bbs/format/versioner.go new file mode 100644 index 00000000..e4c3feb2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/versioner.go @@ -0,0 +1,16 @@ +package format + +import "github.com/gogo/protobuf/proto" + +type Version byte + +const ( + V0 Version = 0 + V1 Version = 1 + V2 Version = 2 + V3 Version = 3 +) + +type Model interface { + proto.Message +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.go b/vendor/code.cloudfoundry.org/bbs/models/actions.go new file mode 100644 index 00000000..1c2a13c8 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.go @@ -0,0 +1,592 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "code.cloudfoundry.org/bbs/format" + proto "github.com/gogo/protobuf/proto" +) + +const ( + ActionTypeDownload = "download" + ActionTypeEmitProgress = "emit_progress" + ActionTypeRun = "run" + ActionTypeUpload = "upload" + ActionTypeTimeout = "timeout" + ActionTypeTry = "try" + ActionTypeParallel = "parallel" + ActionTypeSerial = "serial" + ActionTypeCodependent = "codependent" +) + +var ErrInvalidActionType = errors.New("invalid action type") + +type ActionInterface interface { + ActionType() string + Validate() error + proto.Message +} + +func (a *Action) GetValue() interface{} { + if a.DownloadAction != nil { + return a.DownloadAction + } + if a.UploadAction != nil { + return a.UploadAction + } + if a.RunAction != nil { + return a.RunAction + } + if a.TimeoutAction != nil { + return a.TimeoutAction + } + if a.EmitProgressAction != nil { + return a.EmitProgressAction + } + if a.TryAction != nil { + return a.TryAction + } + if a.ParallelAction != nil { + return a.ParallelAction + } + if a.SerialAction != nil { + return a.SerialAction + } + if a.CodependentAction != nil { + return a.CodependentAction + } + return nil +} + +func (a *Action) SetValue(value interface{}) bool { + switch vt := value.(type) { + case *DownloadAction: + a.DownloadAction = vt + case *UploadAction: + a.UploadAction = vt + case *RunAction: + a.RunAction = vt + case *TimeoutAction: + a.TimeoutAction = vt + case *EmitProgressAction: + a.EmitProgressAction = vt + case *TryAction: + a.TryAction = vt + case *ParallelAction: + a.ParallelAction = vt + case *SerialAction: + a.SerialAction = vt + case *CodependentAction: + a.CodependentAction = vt + default: + return false + } + return true +} + +func (a *Action) Validate() error { + if a == nil { + return nil + } + + if inner := UnwrapAction(a); inner != nil { + err := inner.Validate() + if err != nil { + return err + } + } else { + return ErrInvalidField{"inner-action"} + } + return nil +} + +func (a *DownloadAction) ActionType() string { + return ActionTypeDownload +} + +func (a DownloadAction) Validate() error { + var validationError ValidationError + + if a.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if a.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if a.GetUser() == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if a.GetChecksumValue() != "" && a.GetChecksumAlgorithm() == "" { + validationError = validationError.Append(ErrInvalidField{"checksum algorithm"}) + } + + if a.GetChecksumValue() == "" && a.GetChecksumAlgorithm() != "" { + validationError = validationError.Append(ErrInvalidField{"checksum value"}) + } + + if a.GetChecksumValue() != "" && a.GetChecksumAlgorithm() != "" { + if !contains([]string{"md5", "sha1", "sha256"}, strings.ToLower(a.GetChecksumAlgorithm())) { + validationError = validationError.Append(ErrInvalidField{"invalid algorithm"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func contains(array []string, element string) bool { + for _, item := range array { + if item == element { + return true + } + } + return false +} + +func (a *UploadAction) ActionType() string { + return ActionTypeUpload +} + +func (a UploadAction) Validate() error { + var validationError ValidationError + + if a.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if a.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if a.GetUser() == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *RunAction) ActionType() string { + return ActionTypeRun +} + +func (a RunAction) Validate() error { + var validationError ValidationError + + if a.Path == "" { + validationError = validationError.Append(ErrInvalidField{"path"}) + } + + if a.User == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *TimeoutAction) ActionType() string { + return ActionTypeTimeout +} + +func (a TimeoutAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if a.GetTimeoutMs() <= 0 { + validationError = validationError.Append(ErrInvalidField{"timeout_ms"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *TryAction) ActionType() string { + return ActionTypeTry +} + +func (a TryAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (*ParallelAction) Version() format.Version { + return format.V0 +} + +func (a *ParallelAction) ActionType() string { + return ActionTypeParallel +} + +func (a ParallelAction) Validate() error { + var validationError ValidationError + + if len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *CodependentAction) ActionType() string { + return ActionTypeCodependent +} + +func (a CodependentAction) Validate() error { + var validationError ValidationError + + if len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// func (*SerialAction) Version() format.Version { +// return format.V0 +// } + +// func (*SerialAction) MigrateFromVersion(v format.Version) error { +// return nil +// } + +func (a *SerialAction) ActionType() string { + return ActionTypeSerial +} + +func (a SerialAction) Validate() error { + var validationError ValidationError + + if len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *EmitProgressAction) ActionType() string { + return ActionTypeEmitProgress +} + +func (a EmitProgressAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func EmitProgressFor(action ActionInterface, startMessage string, successMessage string, failureMessagePrefix string) *EmitProgressAction { + return &EmitProgressAction{ + Action: WrapAction(action), + StartMessage: startMessage, + SuccessMessage: successMessage, + FailureMessagePrefix: failureMessagePrefix, + } +} + +func Timeout(action ActionInterface, timeout time.Duration) *TimeoutAction { + return &TimeoutAction{ + Action: WrapAction(action), + TimeoutMs: (int64)(timeout / 1000000), + } +} + +func Try(action ActionInterface) *TryAction { + return &TryAction{Action: WrapAction(action)} +} + +func Parallel(actions ...ActionInterface) *ParallelAction { + return &ParallelAction{Actions: WrapActions(actions)} +} + +func Codependent(actions ...ActionInterface) *CodependentAction { + return &CodependentAction{Actions: WrapActions(actions)} +} + +func Serial(actions ...ActionInterface) *SerialAction { + return &SerialAction{Actions: WrapActions(actions)} +} + +func UnwrapAction(action *Action) ActionInterface { + if action == nil { + return nil + } + a := action.GetValue() + if a == nil { + return nil + } + return a.(ActionInterface) +} + +func WrapActions(actions []ActionInterface) []*Action { + wrappedActions := make([]*Action, 0, len(actions)) + for _, action := range actions { + wrappedActions = append(wrappedActions, WrapAction(action)) + } + return wrappedActions +} + +func WrapAction(action ActionInterface) *Action { + if action == nil { + return nil + } + a := &Action{} + a.SetValue(action) + return a +} + +// SetDeprecatedTimeoutNs returns a deep copy of the Action tree. If there are +// any TimeoutActions in the tree, their DeprecatedStartTimeoutS is set to +// `TimeoutMs * time.Millisecond'. +func (action *Action) SetDeprecatedTimeoutNs() *Action { + if action == nil { + return nil + } + + a := action.GetValue() + switch actionModel := a.(type) { + case *RunAction, *DownloadAction, *UploadAction: + return action + + case *TimeoutAction: + timeoutAction := *actionModel + timeoutAction.DeprecatedTimeoutNs = timeoutAction.TimeoutMs * int64(time.Millisecond) + return WrapAction(&timeoutAction) + + case *EmitProgressAction: + return actionModel.Action.SetDeprecatedTimeoutNs() + + case *TryAction: + return actionModel.Action.SetDeprecatedTimeoutNs() + + case *ParallelAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + parallelAction := *actionModel + parallelAction.Actions = newActions + return WrapAction(¶llelAction) + + case *SerialAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + serialAction := *actionModel + serialAction.Actions = newActions + return WrapAction(&serialAction) + + case *CodependentAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + codependentAction := *actionModel + codependentAction.Actions = newActions + return WrapAction(&codependentAction) + } + + return action +} + +func (action *Action) SetTimeoutMsFromDeprecatedTimeoutNs() { + if action == nil { + return + } + + a := action.GetValue() + switch actionModel := a.(type) { + case *RunAction, *DownloadAction, *UploadAction: + return + + case *TimeoutAction: + timeoutAction := actionModel + timeoutAction.TimeoutMs = timeoutAction.DeprecatedTimeoutNs / int64(time.Millisecond) + + case *EmitProgressAction: + actionModel.Action.SetDeprecatedTimeoutNs() + + case *TryAction: + actionModel.Action.SetDeprecatedTimeoutNs() + + case *ParallelAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + + case *SerialAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + + case *CodependentAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + } +} + +type internalResourceLimits struct { + Nofile *uint64 `json:"nofile,omitempty"` + Nproc *uint64 `json:"nproc,omitempty"` +} + +func (l *ResourceLimits) UnmarshalJSON(data []byte) error { + var limit internalResourceLimits + if err := json.Unmarshal(data, &limit); err != nil { + return err + } + + if limit.Nofile != nil { + l.SetNofile(*limit.Nofile) + } + if limit.Nproc != nil { + l.SetNproc(*limit.Nproc) + } + + return nil +} + +func (l ResourceLimits) MarshalJSON() ([]byte, error) { + var limit internalResourceLimits + if l.NofileExists() { + n := l.GetNofile() + limit.Nofile = &n + } + if l.NprocExists() { + n := l.GetNproc() + limit.Nproc = &n + } + return json.Marshal(limit) +} + +func (l *ResourceLimits) SetNofile(nofile uint64) { + l.OptionalNofile = &ResourceLimits_Nofile{ + Nofile: nofile, + } +} + +func (m *ResourceLimits) GetNofilePtr() *uint64 { + if x, ok := m.GetOptionalNofile().(*ResourceLimits_Nofile); ok { + return &x.Nofile + } + return nil +} + +func (l *ResourceLimits) NofileExists() bool { + _, ok := l.GetOptionalNofile().(*ResourceLimits_Nofile) + return ok +} + +func (l *ResourceLimits) SetNproc(nproc uint64) { + l.OptionalNproc = &ResourceLimits_Nproc{ + Nproc: nproc, + } +} + +func (l *ResourceLimits) NprocExists() bool { + _, ok := l.GetOptionalNproc().(*ResourceLimits_Nproc) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go new file mode 100644 index 00000000..21ae6c5a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go @@ -0,0 +1,5157 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actions.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Action struct { + // Note: we only expect one of the following set of fields to be + // set. Previously we used `option (gogoproto.onlyone) = true' but since this + // is now deprecated and oneof introduces a lot of structural changes, we + // deferred on switching to oneof for now until there is a good reason for it. + // disadvantages of using multiple optionals as opposed to oneof are: + // - less memory usage + // disadvantages of using multiple optionals without onlyone: + // - writing our own GetAction/SetAction methods + // action oneof { + DownloadAction *DownloadAction `protobuf:"bytes,1,opt,name=download_action,json=downloadAction,proto3" json:"download,omitempty"` + UploadAction *UploadAction `protobuf:"bytes,2,opt,name=upload_action,json=uploadAction,proto3" json:"upload,omitempty"` + RunAction *RunAction `protobuf:"bytes,3,opt,name=run_action,json=runAction,proto3" json:"run,omitempty"` + TimeoutAction *TimeoutAction `protobuf:"bytes,4,opt,name=timeout_action,json=timeoutAction,proto3" json:"timeout,omitempty"` + EmitProgressAction *EmitProgressAction `protobuf:"bytes,5,opt,name=emit_progress_action,json=emitProgressAction,proto3" json:"emit_progress,omitempty"` + TryAction *TryAction `protobuf:"bytes,6,opt,name=try_action,json=tryAction,proto3" json:"try,omitempty"` + ParallelAction *ParallelAction `protobuf:"bytes,7,opt,name=parallel_action,json=parallelAction,proto3" json:"parallel,omitempty"` + SerialAction *SerialAction `protobuf:"bytes,8,opt,name=serial_action,json=serialAction,proto3" json:"serial,omitempty"` + CodependentAction *CodependentAction `protobuf:"bytes,9,opt,name=codependent_action,json=codependentAction,proto3" json:"codependent,omitempty"` +} + +func (m *Action) Reset() { *m = Action{} } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Action.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(m, src) +} +func (m *Action) XXX_Size() int { + return m.Size() +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +func (m *Action) GetDownloadAction() *DownloadAction { + if m != nil { + return m.DownloadAction + } + return nil +} + +func (m *Action) GetUploadAction() *UploadAction { + if m != nil { + return m.UploadAction + } + return nil +} + +func (m *Action) GetRunAction() *RunAction { + if m != nil { + return m.RunAction + } + return nil +} + +func (m *Action) GetTimeoutAction() *TimeoutAction { + if m != nil { + return m.TimeoutAction + } + return nil +} + +func (m *Action) GetEmitProgressAction() *EmitProgressAction { + if m != nil { + return m.EmitProgressAction + } + return nil +} + +func (m *Action) GetTryAction() *TryAction { + if m != nil { + return m.TryAction + } + return nil +} + +func (m *Action) GetParallelAction() *ParallelAction { + if m != nil { + return m.ParallelAction + } + return nil +} + +func (m *Action) GetSerialAction() *SerialAction { + if m != nil { + return m.SerialAction + } + return nil +} + +func (m *Action) GetCodependentAction() *CodependentAction { + if m != nil { + return m.CodependentAction + } + return nil +} + +type DownloadAction struct { + Artifact string `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + CacheKey string `protobuf:"bytes,4,opt,name=cache_key,json=cacheKey,proto3" json:"cache_key"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user"` + ChecksumAlgorithm string `protobuf:"bytes,7,opt,name=checksum_algorithm,json=checksumAlgorithm,proto3" json:"checksum_algorithm,omitempty"` + ChecksumValue string `protobuf:"bytes,8,opt,name=checksum_value,json=checksumValue,proto3" json:"checksum_value,omitempty"` +} + +func (m *DownloadAction) Reset() { *m = DownloadAction{} } +func (*DownloadAction) ProtoMessage() {} +func (*DownloadAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{1} +} +func (m *DownloadAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownloadAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DownloadAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DownloadAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownloadAction.Merge(m, src) +} +func (m *DownloadAction) XXX_Size() int { + return m.Size() +} +func (m *DownloadAction) XXX_DiscardUnknown() { + xxx_messageInfo_DownloadAction.DiscardUnknown(m) +} + +var xxx_messageInfo_DownloadAction proto.InternalMessageInfo + +func (m *DownloadAction) GetArtifact() string { + if m != nil { + return m.Artifact + } + return "" +} + +func (m *DownloadAction) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *DownloadAction) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *DownloadAction) GetCacheKey() string { + if m != nil { + return m.CacheKey + } + return "" +} + +func (m *DownloadAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *DownloadAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *DownloadAction) GetChecksumAlgorithm() string { + if m != nil { + return m.ChecksumAlgorithm + } + return "" +} + +func (m *DownloadAction) GetChecksumValue() string { + if m != nil { + return m.ChecksumValue + } + return "" +} + +type UploadAction struct { + Artifact string `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + LogSource string `protobuf:"bytes,4,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + User string `protobuf:"bytes,5,opt,name=user,proto3" json:"user"` +} + +func (m *UploadAction) Reset() { *m = UploadAction{} } +func (*UploadAction) ProtoMessage() {} +func (*UploadAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{2} +} +func (m *UploadAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UploadAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UploadAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UploadAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_UploadAction.Merge(m, src) +} +func (m *UploadAction) XXX_Size() int { + return m.Size() +} +func (m *UploadAction) XXX_DiscardUnknown() { + xxx_messageInfo_UploadAction.DiscardUnknown(m) +} + +var xxx_messageInfo_UploadAction proto.InternalMessageInfo + +func (m *UploadAction) GetArtifact() string { + if m != nil { + return m.Artifact + } + return "" +} + +func (m *UploadAction) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *UploadAction) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *UploadAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *UploadAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +type RunAction struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path"` + Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=dir,proto3" json:"dir,omitempty"` + Env []*EnvironmentVariable `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"` + ResourceLimits *ResourceLimits `protobuf:"bytes,5,opt,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user"` + LogSource string `protobuf:"bytes,7,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + SuppressLogOutput bool `protobuf:"varint,8,opt,name=suppress_log_output,json=suppressLogOutput,proto3" json:"suppress_log_output"` + VolumeMountedFiles []*File `protobuf:"bytes,9,rep,name=volume_mounted_files,json=volumeMountedFiles,proto3" json:"volume_mounted_files"` +} + +func (m *RunAction) Reset() { *m = RunAction{} } +func (*RunAction) ProtoMessage() {} +func (*RunAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{3} +} +func (m *RunAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RunAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RunAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAction.Merge(m, src) +} +func (m *RunAction) XXX_Size() int { + return m.Size() +} +func (m *RunAction) XXX_DiscardUnknown() { + xxx_messageInfo_RunAction.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAction proto.InternalMessageInfo + +func (m *RunAction) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *RunAction) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *RunAction) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func (m *RunAction) GetEnv() []*EnvironmentVariable { + if m != nil { + return m.Env + } + return nil +} + +func (m *RunAction) GetResourceLimits() *ResourceLimits { + if m != nil { + return m.ResourceLimits + } + return nil +} + +func (m *RunAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *RunAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *RunAction) GetSuppressLogOutput() bool { + if m != nil { + return m.SuppressLogOutput + } + return false +} + +func (m *RunAction) GetVolumeMountedFiles() []*File { + if m != nil { + return m.VolumeMountedFiles + } + return nil +} + +type TimeoutAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + DeprecatedTimeoutNs int64 `protobuf:"varint,2,opt,name=deprecated_timeout_ns,json=deprecatedTimeoutNs,proto3" json:"timeout,omitempty"` // Deprecated: Do not use. + LogSource string `protobuf:"bytes,3,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + TimeoutMs int64 `protobuf:"varint,4,opt,name=timeout_ms,json=timeoutMs,proto3" json:"timeout_ms"` +} + +func (m *TimeoutAction) Reset() { *m = TimeoutAction{} } +func (*TimeoutAction) ProtoMessage() {} +func (*TimeoutAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{4} +} +func (m *TimeoutAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeoutAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeoutAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeoutAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeoutAction.Merge(m, src) +} +func (m *TimeoutAction) XXX_Size() int { + return m.Size() +} +func (m *TimeoutAction) XXX_DiscardUnknown() { + xxx_messageInfo_TimeoutAction.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeoutAction proto.InternalMessageInfo + +func (m *TimeoutAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +// Deprecated: Do not use. +func (m *TimeoutAction) GetDeprecatedTimeoutNs() int64 { + if m != nil { + return m.DeprecatedTimeoutNs + } + return 0 +} + +func (m *TimeoutAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *TimeoutAction) GetTimeoutMs() int64 { + if m != nil { + return m.TimeoutMs + } + return 0 +} + +type EmitProgressAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + StartMessage string `protobuf:"bytes,2,opt,name=start_message,json=startMessage,proto3" json:"start_message"` + SuccessMessage string `protobuf:"bytes,3,opt,name=success_message,json=successMessage,proto3" json:"success_message"` + FailureMessagePrefix string `protobuf:"bytes,4,opt,name=failure_message_prefix,json=failureMessagePrefix,proto3" json:"failure_message_prefix"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *EmitProgressAction) Reset() { *m = EmitProgressAction{} } +func (*EmitProgressAction) ProtoMessage() {} +func (*EmitProgressAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{5} +} +func (m *EmitProgressAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmitProgressAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EmitProgressAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EmitProgressAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmitProgressAction.Merge(m, src) +} +func (m *EmitProgressAction) XXX_Size() int { + return m.Size() +} +func (m *EmitProgressAction) XXX_DiscardUnknown() { + xxx_messageInfo_EmitProgressAction.DiscardUnknown(m) +} + +var xxx_messageInfo_EmitProgressAction proto.InternalMessageInfo + +func (m *EmitProgressAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *EmitProgressAction) GetStartMessage() string { + if m != nil { + return m.StartMessage + } + return "" +} + +func (m *EmitProgressAction) GetSuccessMessage() string { + if m != nil { + return m.SuccessMessage + } + return "" +} + +func (m *EmitProgressAction) GetFailureMessagePrefix() string { + if m != nil { + return m.FailureMessagePrefix + } + return "" +} + +func (m *EmitProgressAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type TryAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *TryAction) Reset() { *m = TryAction{} } +func (*TryAction) ProtoMessage() {} +func (*TryAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{6} +} +func (m *TryAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TryAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TryAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TryAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TryAction.Merge(m, src) +} +func (m *TryAction) XXX_Size() int { + return m.Size() +} +func (m *TryAction) XXX_DiscardUnknown() { + xxx_messageInfo_TryAction.DiscardUnknown(m) +} + +var xxx_messageInfo_TryAction proto.InternalMessageInfo + +func (m *TryAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *TryAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type ParallelAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *ParallelAction) Reset() { *m = ParallelAction{} } +func (*ParallelAction) ProtoMessage() {} +func (*ParallelAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{7} +} +func (m *ParallelAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParallelAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParallelAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParallelAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParallelAction.Merge(m, src) +} +func (m *ParallelAction) XXX_Size() int { + return m.Size() +} +func (m *ParallelAction) XXX_DiscardUnknown() { + xxx_messageInfo_ParallelAction.DiscardUnknown(m) +} + +var xxx_messageInfo_ParallelAction proto.InternalMessageInfo + +func (m *ParallelAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *ParallelAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type SerialAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *SerialAction) Reset() { *m = SerialAction{} } +func (*SerialAction) ProtoMessage() {} +func (*SerialAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{8} +} +func (m *SerialAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerialAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SerialAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SerialAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerialAction.Merge(m, src) +} +func (m *SerialAction) XXX_Size() int { + return m.Size() +} +func (m *SerialAction) XXX_DiscardUnknown() { + xxx_messageInfo_SerialAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SerialAction proto.InternalMessageInfo + +func (m *SerialAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *SerialAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type CodependentAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *CodependentAction) Reset() { *m = CodependentAction{} } +func (*CodependentAction) ProtoMessage() {} +func (*CodependentAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{9} +} +func (m *CodependentAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CodependentAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CodependentAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CodependentAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodependentAction.Merge(m, src) +} +func (m *CodependentAction) XXX_Size() int { + return m.Size() +} +func (m *CodependentAction) XXX_DiscardUnknown() { + xxx_messageInfo_CodependentAction.DiscardUnknown(m) +} + +var xxx_messageInfo_CodependentAction proto.InternalMessageInfo + +func (m *CodependentAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *CodependentAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type ResourceLimits struct { + // Types that are valid to be assigned to OptionalNofile: + // + // *ResourceLimits_Nofile + OptionalNofile isResourceLimits_OptionalNofile `protobuf_oneof:"optional_nofile"` + // Types that are valid to be assigned to OptionalNproc: + // + // *ResourceLimits_Nproc + OptionalNproc isResourceLimits_OptionalNproc `protobuf_oneof:"optional_nproc"` +} + +func (m *ResourceLimits) Reset() { *m = ResourceLimits{} } +func (*ResourceLimits) ProtoMessage() {} +func (*ResourceLimits) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{10} +} +func (m *ResourceLimits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceLimits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceLimits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceLimits) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLimits.Merge(m, src) +} +func (m *ResourceLimits) XXX_Size() int { + return m.Size() +} +func (m *ResourceLimits) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLimits.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLimits proto.InternalMessageInfo + +type isResourceLimits_OptionalNofile interface { + isResourceLimits_OptionalNofile() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} +type isResourceLimits_OptionalNproc interface { + isResourceLimits_OptionalNproc() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ResourceLimits_Nofile struct { + Nofile uint64 `protobuf:"varint,1,opt,name=nofile,proto3,oneof" json:"nofile,omitempty"` +} +type ResourceLimits_Nproc struct { + Nproc uint64 `protobuf:"varint,2,opt,name=nproc,proto3,oneof" json:"nproc,omitempty"` +} + +func (*ResourceLimits_Nofile) isResourceLimits_OptionalNofile() {} +func (*ResourceLimits_Nproc) isResourceLimits_OptionalNproc() {} + +func (m *ResourceLimits) GetOptionalNofile() isResourceLimits_OptionalNofile { + if m != nil { + return m.OptionalNofile + } + return nil +} +func (m *ResourceLimits) GetOptionalNproc() isResourceLimits_OptionalNproc { + if m != nil { + return m.OptionalNproc + } + return nil +} + +func (m *ResourceLimits) GetNofile() uint64 { + if x, ok := m.GetOptionalNofile().(*ResourceLimits_Nofile); ok { + return x.Nofile + } + return 0 +} + +// Deprecated: Do not use. +func (m *ResourceLimits) GetNproc() uint64 { + if x, ok := m.GetOptionalNproc().(*ResourceLimits_Nproc); ok { + return x.Nproc + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResourceLimits) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ResourceLimits_Nofile)(nil), + (*ResourceLimits_Nproc)(nil), + } +} + +func init() { + proto.RegisterType((*Action)(nil), "models.Action") + proto.RegisterType((*DownloadAction)(nil), "models.DownloadAction") + proto.RegisterType((*UploadAction)(nil), "models.UploadAction") + proto.RegisterType((*RunAction)(nil), "models.RunAction") + proto.RegisterType((*TimeoutAction)(nil), "models.TimeoutAction") + proto.RegisterType((*EmitProgressAction)(nil), "models.EmitProgressAction") + proto.RegisterType((*TryAction)(nil), "models.TryAction") + proto.RegisterType((*ParallelAction)(nil), "models.ParallelAction") + proto.RegisterType((*SerialAction)(nil), "models.SerialAction") + proto.RegisterType((*CodependentAction)(nil), "models.CodependentAction") + proto.RegisterType((*ResourceLimits)(nil), "models.ResourceLimits") +} + +func init() { proto.RegisterFile("actions.proto", fileDescriptor_eeb49063df94c2b8) } + +var fileDescriptor_eeb49063df94c2b8 = []byte{ + // 1119 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0xdb, 0x36, + 0x14, 0xb7, 0x6c, 0xd7, 0x8d, 0x5e, 0x63, 0xa7, 0x66, 0xfe, 0xd4, 0x75, 0x36, 0x29, 0x30, 0xb0, + 0x21, 0x18, 0x96, 0x14, 0xe8, 0x86, 0x9d, 0x06, 0x0c, 0x75, 0xf7, 0xa7, 0x40, 0x9b, 0x35, 0x60, + 0xba, 0x76, 0x03, 0x06, 0x08, 0x8a, 0x4c, 0x3b, 0x42, 0x24, 0x51, 0x20, 0xa9, 0x6c, 0xbe, 0xed, + 0xbe, 0xcb, 0xbe, 0xc0, 0x30, 0x0c, 0xbb, 0xec, 0xa3, 0xec, 0x18, 0xec, 0xd4, 0x93, 0xb0, 0x38, + 0x97, 0x41, 0xa7, 0x7e, 0x84, 0x41, 0x14, 0xe9, 0x48, 0x4e, 0x8a, 0xf4, 0xd0, 0x5d, 0x04, 0xbe, + 0xf7, 0xfb, 0xbd, 0x1f, 0xc9, 0xf7, 0xc8, 0x47, 0x41, 0xdb, 0xf5, 0x84, 0x4f, 0x23, 0xbe, 0x1b, + 0x33, 0x2a, 0x28, 0x6a, 0x85, 0x74, 0x44, 0x02, 0xde, 0xdf, 0x99, 0xf8, 0xe2, 0x28, 0x39, 0xdc, + 0xf5, 0x68, 0x78, 0x6f, 0x42, 0x27, 0xf4, 0x9e, 0x84, 0x0f, 0x93, 0xb1, 0xb4, 0xa4, 0x21, 0x47, + 0x45, 0x58, 0x7f, 0x93, 0x44, 0x27, 0x3e, 0xa3, 0x51, 0x48, 0x22, 0xe1, 0x9c, 0xb8, 0xcc, 0x77, + 0x0f, 0x03, 0xa2, 0x34, 0xfb, 0x30, 0xf6, 0x03, 0x52, 0x8c, 0x07, 0x3f, 0xb7, 0xa0, 0xf5, 0x40, + 0xce, 0x88, 0x5e, 0xc0, 0xca, 0x88, 0xfe, 0x10, 0x05, 0xd4, 0x1d, 0x39, 0xc5, 0x22, 0x7a, 0xc6, + 0x96, 0xb1, 0x7d, 0xeb, 0xfe, 0xc6, 0x6e, 0xb1, 0x88, 0xdd, 0xcf, 0x15, 0x5c, 0x04, 0x0c, 0x37, + 0xb2, 0xd4, 0x46, 0x3a, 0xe4, 0x43, 0x1a, 0xfa, 0x82, 0x84, 0xb1, 0x98, 0xe2, 0xce, 0xa8, 0xc2, + 0x43, 0x4f, 0xa1, 0x9d, 0xc4, 0x65, 0xd9, 0xba, 0x94, 0x5d, 0xd3, 0xb2, 0xdf, 0xc4, 0x25, 0xd1, + 0xb5, 0x2c, 0xb5, 0x6f, 0x17, 0xf4, 0x92, 0xe4, 0x72, 0x52, 0xe2, 0xa0, 0x87, 0x00, 0x2c, 0x89, + 0xb4, 0x5a, 0x43, 0xaa, 0x75, 0xb5, 0x1a, 0x4e, 0x22, 0x25, 0xd5, 0xcd, 0x52, 0xbb, 0xcd, 0x92, + 0xa8, 0xa4, 0x63, 0x32, 0x8d, 0xa2, 0x03, 0xe8, 0x08, 0x3f, 0x24, 0x34, 0x11, 0x5a, 0xa8, 0x29, + 0x85, 0xd6, 0xb5, 0xd0, 0xb3, 0x02, 0x55, 0x62, 0xeb, 0x59, 0x6a, 0x77, 0x55, 0x40, 0x49, 0xb0, + 0x2d, 0xca, 0x2c, 0xe4, 0xc3, 0x1a, 0x09, 0x7d, 0xe1, 0xc4, 0x8c, 0x4e, 0x18, 0xe1, 0x5c, 0x4b, + 0xdf, 0x90, 0xd2, 0x7d, 0x2d, 0xfd, 0x45, 0xe8, 0x8b, 0x7d, 0x45, 0x51, 0xfa, 0x9b, 0x59, 0x6a, + 0xdf, 0xa9, 0xc4, 0x96, 0x66, 0x41, 0xe4, 0x52, 0x40, 0x9e, 0x04, 0xc1, 0xa6, 0x7a, 0x82, 0x56, + 0x35, 0x09, 0xcf, 0xd8, 0xb4, 0x9c, 0x04, 0xc1, 0xa6, 0xe5, 0x24, 0x08, 0x8d, 0xe6, 0x35, 0x8f, + 0x5d, 0xe6, 0x06, 0x01, 0x09, 0xb4, 0xd2, 0xcd, 0x6a, 0xcd, 0xf7, 0x15, 0x5c, 0xae, 0xb9, 0x0e, + 0x29, 0xd7, 0x3c, 0xae, 0xf0, 0xf2, 0x9a, 0x73, 0xc2, 0x7c, 0x77, 0x2e, 0xbb, 0x54, 0xad, 0xf9, + 0x81, 0x04, 0xcb, 0x35, 0x2f, 0xe8, 0xe5, 0x9a, 0xf3, 0x12, 0x07, 0x79, 0x80, 0x3c, 0x3a, 0x22, + 0x31, 0x89, 0x46, 0xf9, 0x99, 0x56, 0xaa, 0xa6, 0x54, 0xbd, 0xab, 0x55, 0x1f, 0x5e, 0x30, 0x94, + 0xf4, 0xdd, 0x2c, 0xb5, 0xd7, 0x4b, 0x81, 0x25, 0xfd, 0xae, 0xb7, 0xc8, 0x1e, 0xfc, 0x5e, 0x87, + 0x4e, 0xf5, 0x90, 0xa3, 0x3e, 0x2c, 0xb9, 0x4c, 0xf8, 0x63, 0xd7, 0x13, 0xf2, 0x3a, 0x98, 0x78, + 0x6e, 0xa3, 0x77, 0xa0, 0x39, 0x66, 0x34, 0x94, 0xe7, 0xd9, 0x1c, 0x2e, 0x65, 0xa9, 0x2d, 0x6d, + 0x2c, 0xbf, 0x68, 0x03, 0xea, 0x82, 0xca, 0xd3, 0x69, 0x0e, 0x5b, 0x59, 0x6a, 0xd7, 0x05, 0xc5, + 0x75, 0x41, 0xd1, 0x07, 0x60, 0x7a, 0xae, 0x77, 0x44, 0x9c, 0x63, 0x32, 0x95, 0x67, 0xce, 0x1c, + 0xb6, 0xb3, 0xd4, 0xbe, 0x70, 0xe2, 0x25, 0x39, 0x7c, 0x4c, 0xa6, 0xe8, 0x5d, 0x80, 0x80, 0x4e, + 0x1c, 0x4e, 0x13, 0xe6, 0x11, 0x79, 0x8a, 0x4c, 0x6c, 0x06, 0x74, 0x72, 0x20, 0x1d, 0xf9, 0x02, + 0x12, 0x4e, 0x98, 0xac, 0xbe, 0x5a, 0x40, 0x6e, 0x63, 0xf9, 0x45, 0x3b, 0x80, 0xbc, 0x23, 0xe2, + 0x1d, 0xf3, 0x24, 0x74, 0xdc, 0x60, 0x42, 0x99, 0x2f, 0x8e, 0x42, 0x59, 0x5f, 0x13, 0x77, 0x35, + 0xf2, 0x40, 0x03, 0xe8, 0x3d, 0xe8, 0xcc, 0xe9, 0x27, 0x6e, 0x90, 0x10, 0x59, 0x33, 0x13, 0xb7, + 0xb5, 0xf7, 0x79, 0xee, 0x1c, 0xfc, 0x6a, 0xc0, 0x72, 0xf9, 0xc6, 0xfe, 0x0f, 0x19, 0xaa, 0xee, + 0xba, 0xf9, 0xba, 0x5d, 0xdf, 0xb8, 0x6a, 0xd7, 0x83, 0xdf, 0x1a, 0x60, 0xce, 0x7b, 0x40, 0xce, + 0x8d, 0x5d, 0x71, 0x54, 0x2c, 0xac, 0xe0, 0xe6, 0x36, 0x96, 0x5f, 0x84, 0xa0, 0xe9, 0xb2, 0x09, + 0xef, 0xd5, 0xb7, 0x1a, 0xdb, 0x26, 0x96, 0x63, 0x74, 0x1b, 0x1a, 0x23, 0x9f, 0x15, 0xab, 0xc2, + 0xf9, 0x10, 0xed, 0x40, 0x83, 0x44, 0x27, 0xbd, 0xe6, 0x56, 0x63, 0xfb, 0xd6, 0xfd, 0xcd, 0xf9, + 0x1d, 0xbe, 0xe8, 0xb0, 0xcf, 0x55, 0x83, 0xc5, 0x39, 0x0f, 0x7d, 0x06, 0x2b, 0x8c, 0x14, 0x6b, + 0x77, 0x02, 0x3f, 0xf4, 0x05, 0x57, 0xd7, 0x7f, 0x7e, 0xa7, 0xb0, 0x82, 0x9f, 0x48, 0x14, 0x77, + 0x58, 0xc5, 0xbe, 0xa6, 0xaa, 0xd5, 0xe4, 0xdc, 0x5c, 0x4c, 0xce, 0x57, 0xb0, 0xca, 0x93, 0x38, + 0x96, 0xcd, 0x27, 0xe7, 0xd1, 0x44, 0xc4, 0x89, 0x90, 0xa5, 0x5c, 0x1a, 0xde, 0xc9, 0x52, 0xfb, + 0x2a, 0x18, 0x77, 0xb5, 0xf3, 0x09, 0x9d, 0x3c, 0x95, 0x2e, 0xf4, 0x2d, 0xac, 0x9d, 0xd0, 0x20, + 0x09, 0x89, 0x13, 0xd2, 0x24, 0x12, 0x64, 0xe4, 0xe4, 0xcf, 0x06, 0xef, 0x99, 0x32, 0x0d, 0xcb, + 0x7a, 0x2f, 0x5f, 0xfa, 0x01, 0x19, 0xf6, 0xb2, 0xd4, 0xbe, 0x92, 0x8d, 0x51, 0xe1, 0xdd, 0x2b, + 0x9c, 0x39, 0x99, 0x0f, 0xfe, 0x36, 0xa0, 0x5d, 0x69, 0xae, 0xe8, 0x7d, 0x68, 0x55, 0x5e, 0x9c, + 0x8e, 0x56, 0x2f, 0x70, 0xac, 0x50, 0xf4, 0x18, 0xd6, 0x47, 0x24, 0x66, 0xc4, 0x73, 0xf3, 0x19, + 0x74, 0xfb, 0x8e, 0xb8, 0x3c, 0x5f, 0x0d, 0xb9, 0xbd, 0xcb, 0x3d, 0xba, 0x67, 0xe0, 0xd5, 0x8b, + 0x28, 0x35, 0xf1, 0xd7, 0x7c, 0x21, 0x91, 0x8d, 0xc5, 0x44, 0xee, 0x00, 0xe8, 0x09, 0x42, 0x2e, + 0x0f, 0x61, 0x63, 0xd8, 0xc9, 0x52, 0xbb, 0xe4, 0xc5, 0xa6, 0x1a, 0xef, 0xf1, 0xc1, 0x1f, 0x75, + 0x40, 0x97, 0xdb, 0xfa, 0x1b, 0xef, 0xec, 0x13, 0x68, 0x73, 0xe1, 0x32, 0xe1, 0x84, 0x84, 0x73, + 0x77, 0x42, 0xd4, 0x8d, 0x91, 0xdd, 0xbb, 0x02, 0xe0, 0x65, 0x69, 0xee, 0x15, 0x16, 0xfa, 0x14, + 0x56, 0x78, 0xe2, 0x79, 0x79, 0x39, 0x75, 0x64, 0x71, 0x9f, 0x56, 0xb3, 0xd4, 0x5e, 0x84, 0x70, + 0x47, 0x39, 0x74, 0xf4, 0x3e, 0x6c, 0x8c, 0x5d, 0x3f, 0x48, 0x18, 0xd1, 0x14, 0x27, 0x66, 0x64, + 0xec, 0xff, 0xa8, 0xfa, 0x52, 0x3f, 0x4b, 0xed, 0xd7, 0x30, 0xf0, 0x9a, 0xf2, 0x2b, 0xad, 0x7d, + 0xe9, 0xbd, 0xa6, 0x61, 0x0d, 0x30, 0x98, 0xf3, 0xa7, 0xe9, 0x8d, 0x73, 0x53, 0xd5, 0xac, 0x2f, + 0x6a, 0x7e, 0x07, 0x9d, 0xea, 0x23, 0x85, 0xb6, 0xe1, 0xa6, 0xfa, 0x8b, 0xea, 0x19, 0xf2, 0xb4, + 0x2e, 0x2a, 0x6b, 0xf8, 0x3a, 0xe9, 0x17, 0xb0, 0x5c, 0x7e, 0xa8, 0xde, 0x9e, 0xf0, 0xf7, 0xd0, + 0xbd, 0xf4, 0x56, 0xbd, 0x3d, 0xf5, 0x63, 0xe8, 0x54, 0x5b, 0x0c, 0xea, 0x41, 0x2b, 0xa2, 0xf9, + 0x8d, 0x94, 0xa9, 0x6e, 0x3e, 0xaa, 0x61, 0x65, 0xa3, 0x3e, 0xdc, 0x88, 0x62, 0x46, 0x3d, 0xa9, + 0xd2, 0x1c, 0xd6, 0x7b, 0xc6, 0x23, 0x03, 0x17, 0xae, 0x61, 0x17, 0x56, 0x68, 0x9c, 0xcf, 0xe8, + 0x06, 0x4e, 0x41, 0x1f, 0xde, 0x86, 0xce, 0x85, 0x4b, 0x92, 0x3e, 0x3e, 0x3d, 0xb3, 0x6a, 0x2f, + 0xcf, 0xac, 0xda, 0xab, 0x33, 0xcb, 0xf8, 0x69, 0x66, 0x19, 0x7f, 0xce, 0x2c, 0xe3, 0xaf, 0x99, + 0x65, 0x9c, 0xce, 0x2c, 0xe3, 0x9f, 0x99, 0x65, 0xfc, 0x3b, 0xb3, 0x6a, 0xaf, 0x66, 0x96, 0xf1, + 0xcb, 0xb9, 0x55, 0x3b, 0x3d, 0xb7, 0x6a, 0x2f, 0xcf, 0xad, 0xda, 0x61, 0x4b, 0xfe, 0x7e, 0x7e, + 0xf4, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x06, 0xa4, 0xbb, 0xef, 0x0a, 0x00, 0x00, +} + +func (this *Action) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Action) + if !ok { + that2, ok := that.(Action) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DownloadAction.Equal(that1.DownloadAction) { + return false + } + if !this.UploadAction.Equal(that1.UploadAction) { + return false + } + if !this.RunAction.Equal(that1.RunAction) { + return false + } + if !this.TimeoutAction.Equal(that1.TimeoutAction) { + return false + } + if !this.EmitProgressAction.Equal(that1.EmitProgressAction) { + return false + } + if !this.TryAction.Equal(that1.TryAction) { + return false + } + if !this.ParallelAction.Equal(that1.ParallelAction) { + return false + } + if !this.SerialAction.Equal(that1.SerialAction) { + return false + } + if !this.CodependentAction.Equal(that1.CodependentAction) { + return false + } + return true +} +func (this *DownloadAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DownloadAction) + if !ok { + that2, ok := that.(DownloadAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Artifact != that1.Artifact { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.CacheKey != that1.CacheKey { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.User != that1.User { + return false + } + if this.ChecksumAlgorithm != that1.ChecksumAlgorithm { + return false + } + if this.ChecksumValue != that1.ChecksumValue { + return false + } + return true +} +func (this *UploadAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UploadAction) + if !ok { + that2, ok := that.(UploadAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Artifact != that1.Artifact { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.User != that1.User { + return false + } + return true +} +func (this *RunAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RunAction) + if !ok { + that2, ok := that.(RunAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if len(this.Args) != len(that1.Args) { + return false + } + for i := range this.Args { + if this.Args[i] != that1.Args[i] { + return false + } + } + if this.Dir != that1.Dir { + return false + } + if len(this.Env) != len(that1.Env) { + return false + } + for i := range this.Env { + if !this.Env[i].Equal(that1.Env[i]) { + return false + } + } + if !this.ResourceLimits.Equal(that1.ResourceLimits) { + return false + } + if this.User != that1.User { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.SuppressLogOutput != that1.SuppressLogOutput { + return false + } + if len(this.VolumeMountedFiles) != len(that1.VolumeMountedFiles) { + return false + } + for i := range this.VolumeMountedFiles { + if !this.VolumeMountedFiles[i].Equal(that1.VolumeMountedFiles[i]) { + return false + } + } + return true +} +func (this *TimeoutAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeoutAction) + if !ok { + that2, ok := that.(TimeoutAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DeprecatedTimeoutNs != that1.DeprecatedTimeoutNs { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.TimeoutMs != that1.TimeoutMs { + return false + } + return true +} +func (this *EmitProgressAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EmitProgressAction) + if !ok { + that2, ok := that.(EmitProgressAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.StartMessage != that1.StartMessage { + return false + } + if this.SuccessMessage != that1.SuccessMessage { + return false + } + if this.FailureMessagePrefix != that1.FailureMessagePrefix { + return false + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *TryAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TryAction) + if !ok { + that2, ok := that.(TryAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *ParallelAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ParallelAction) + if !ok { + that2, ok := that.(ParallelAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *SerialAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerialAction) + if !ok { + that2, ok := that.(SerialAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *CodependentAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CodependentAction) + if !ok { + that2, ok := that.(CodependentAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *ResourceLimits) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits) + if !ok { + that2, ok := that.(ResourceLimits) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.OptionalNofile == nil { + if this.OptionalNofile != nil { + return false + } + } else if this.OptionalNofile == nil { + return false + } else if !this.OptionalNofile.Equal(that1.OptionalNofile) { + return false + } + if that1.OptionalNproc == nil { + if this.OptionalNproc != nil { + return false + } + } else if this.OptionalNproc == nil { + return false + } else if !this.OptionalNproc.Equal(that1.OptionalNproc) { + return false + } + return true +} +func (this *ResourceLimits_Nofile) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits_Nofile) + if !ok { + that2, ok := that.(ResourceLimits_Nofile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Nofile != that1.Nofile { + return false + } + return true +} +func (this *ResourceLimits_Nproc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits_Nproc) + if !ok { + that2, ok := that.(ResourceLimits_Nproc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Nproc != that1.Nproc { + return false + } + return true +} +func (this *Action) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&models.Action{") + if this.DownloadAction != nil { + s = append(s, "DownloadAction: "+fmt.Sprintf("%#v", this.DownloadAction)+",\n") + } + if this.UploadAction != nil { + s = append(s, "UploadAction: "+fmt.Sprintf("%#v", this.UploadAction)+",\n") + } + if this.RunAction != nil { + s = append(s, "RunAction: "+fmt.Sprintf("%#v", this.RunAction)+",\n") + } + if this.TimeoutAction != nil { + s = append(s, "TimeoutAction: "+fmt.Sprintf("%#v", this.TimeoutAction)+",\n") + } + if this.EmitProgressAction != nil { + s = append(s, "EmitProgressAction: "+fmt.Sprintf("%#v", this.EmitProgressAction)+",\n") + } + if this.TryAction != nil { + s = append(s, "TryAction: "+fmt.Sprintf("%#v", this.TryAction)+",\n") + } + if this.ParallelAction != nil { + s = append(s, "ParallelAction: "+fmt.Sprintf("%#v", this.ParallelAction)+",\n") + } + if this.SerialAction != nil { + s = append(s, "SerialAction: "+fmt.Sprintf("%#v", this.SerialAction)+",\n") + } + if this.CodependentAction != nil { + s = append(s, "CodependentAction: "+fmt.Sprintf("%#v", this.CodependentAction)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DownloadAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.DownloadAction{") + s = append(s, "Artifact: "+fmt.Sprintf("%#v", this.Artifact)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "CacheKey: "+fmt.Sprintf("%#v", this.CacheKey)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "ChecksumAlgorithm: "+fmt.Sprintf("%#v", this.ChecksumAlgorithm)+",\n") + s = append(s, "ChecksumValue: "+fmt.Sprintf("%#v", this.ChecksumValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UploadAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.UploadAction{") + s = append(s, "Artifact: "+fmt.Sprintf("%#v", this.Artifact)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RunAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&models.RunAction{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Args: "+fmt.Sprintf("%#v", this.Args)+",\n") + s = append(s, "Dir: "+fmt.Sprintf("%#v", this.Dir)+",\n") + if this.Env != nil { + s = append(s, "Env: "+fmt.Sprintf("%#v", this.Env)+",\n") + } + if this.ResourceLimits != nil { + s = append(s, "ResourceLimits: "+fmt.Sprintf("%#v", this.ResourceLimits)+",\n") + } + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "SuppressLogOutput: "+fmt.Sprintf("%#v", this.SuppressLogOutput)+",\n") + if this.VolumeMountedFiles != nil { + s = append(s, "VolumeMountedFiles: "+fmt.Sprintf("%#v", this.VolumeMountedFiles)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeoutAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.TimeoutAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DeprecatedTimeoutNs: "+fmt.Sprintf("%#v", this.DeprecatedTimeoutNs)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "TimeoutMs: "+fmt.Sprintf("%#v", this.TimeoutMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EmitProgressAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.EmitProgressAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "StartMessage: "+fmt.Sprintf("%#v", this.StartMessage)+",\n") + s = append(s, "SuccessMessage: "+fmt.Sprintf("%#v", this.SuccessMessage)+",\n") + s = append(s, "FailureMessagePrefix: "+fmt.Sprintf("%#v", this.FailureMessagePrefix)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TryAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TryAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ParallelAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ParallelAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SerialAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.SerialAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CodependentAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.CodependentAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceLimits) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ResourceLimits{") + if this.OptionalNofile != nil { + s = append(s, "OptionalNofile: "+fmt.Sprintf("%#v", this.OptionalNofile)+",\n") + } + if this.OptionalNproc != nil { + s = append(s, "OptionalNproc: "+fmt.Sprintf("%#v", this.OptionalNproc)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceLimits_Nofile) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ResourceLimits_Nofile{` + + `Nofile:` + fmt.Sprintf("%#v", this.Nofile) + `}`}, ", ") + return s +} +func (this *ResourceLimits_Nproc) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ResourceLimits_Nproc{` + + `Nproc:` + fmt.Sprintf("%#v", this.Nproc) + `}`}, ", ") + return s +} +func valueToGoStringActions(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Action) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CodependentAction != nil { + { + size, err := m.CodependentAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.SerialAction != nil { + { + size, err := m.SerialAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ParallelAction != nil { + { + size, err := m.ParallelAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TryAction != nil { + { + size, err := m.TryAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.EmitProgressAction != nil { + { + size, err := m.EmitProgressAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.TimeoutAction != nil { + { + size, err := m.TimeoutAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RunAction != nil { + { + size, err := m.RunAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UploadAction != nil { + { + size, err := m.UploadAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.DownloadAction != nil { + { + size, err := m.DownloadAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownloadAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownloadAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownloadAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChecksumValue) > 0 { + i -= len(m.ChecksumValue) + copy(dAtA[i:], m.ChecksumValue) + i = encodeVarintActions(dAtA, i, uint64(len(m.ChecksumValue))) + i-- + dAtA[i] = 0x42 + } + if len(m.ChecksumAlgorithm) > 0 { + i -= len(m.ChecksumAlgorithm) + copy(dAtA[i:], m.ChecksumAlgorithm) + i = encodeVarintActions(dAtA, i, uint64(len(m.ChecksumAlgorithm))) + i-- + dAtA[i] = 0x3a + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.CacheKey) > 0 { + i -= len(m.CacheKey) + copy(dAtA[i:], m.CacheKey) + i = encodeVarintActions(dAtA, i, uint64(len(m.CacheKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintActions(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintActions(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Artifact) > 0 { + i -= len(m.Artifact) + copy(dAtA[i:], m.Artifact) + i = encodeVarintActions(dAtA, i, uint64(len(m.Artifact))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UploadAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UploadAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UploadAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x2a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintActions(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintActions(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Artifact) > 0 { + i -= len(m.Artifact) + copy(dAtA[i:], m.Artifact) + i = encodeVarintActions(dAtA, i, uint64(len(m.Artifact))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeMountedFiles) > 0 { + for iNdEx := len(m.VolumeMountedFiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMountedFiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.SuppressLogOutput { + i-- + if m.SuppressLogOutput { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x3a + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 + } + if m.ResourceLimits != nil { + { + size, err := m.ResourceLimits.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Dir) > 0 { + i -= len(m.Dir) + copy(dAtA[i:], m.Dir) + i = encodeVarintActions(dAtA, i, uint64(len(m.Dir))) + i-- + dAtA[i] = 0x1a + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintActions(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintActions(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimeoutAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeoutAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeoutAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutMs != 0 { + i = encodeVarintActions(dAtA, i, uint64(m.TimeoutMs)) + i-- + dAtA[i] = 0x20 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x1a + } + if m.DeprecatedTimeoutNs != 0 { + i = encodeVarintActions(dAtA, i, uint64(m.DeprecatedTimeoutNs)) + i-- + dAtA[i] = 0x10 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EmitProgressAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmitProgressAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmitProgressAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.FailureMessagePrefix) > 0 { + i -= len(m.FailureMessagePrefix) + copy(dAtA[i:], m.FailureMessagePrefix) + i = encodeVarintActions(dAtA, i, uint64(len(m.FailureMessagePrefix))) + i-- + dAtA[i] = 0x22 + } + if len(m.SuccessMessage) > 0 { + i -= len(m.SuccessMessage) + copy(dAtA[i:], m.SuccessMessage) + i = encodeVarintActions(dAtA, i, uint64(len(m.SuccessMessage))) + i-- + dAtA[i] = 0x1a + } + if len(m.StartMessage) > 0 { + i -= len(m.StartMessage) + copy(dAtA[i:], m.StartMessage) + i = encodeVarintActions(dAtA, i, uint64(len(m.StartMessage))) + i-- + dAtA[i] = 0x12 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TryAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TryAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TryAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParallelAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParallelAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParallelAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SerialAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerialAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerialAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CodependentAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CodependentAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CodependentAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLimits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalNproc != nil { + { + size := m.OptionalNproc.Size() + i -= size + if _, err := m.OptionalNproc.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.OptionalNofile != nil { + { + size := m.OptionalNofile.Size() + i -= size + if _, err := m.OptionalNofile.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceLimits_Nofile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits_Nofile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActions(dAtA, i, uint64(m.Nofile)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *ResourceLimits_Nproc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits_Nproc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActions(dAtA, i, uint64(m.Nproc)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func encodeVarintActions(dAtA []byte, offset int, v uint64) int { + offset -= sovActions(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Action) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DownloadAction != nil { + l = m.DownloadAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.UploadAction != nil { + l = m.UploadAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.RunAction != nil { + l = m.RunAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.TimeoutAction != nil { + l = m.TimeoutAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.EmitProgressAction != nil { + l = m.EmitProgressAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.TryAction != nil { + l = m.TryAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.ParallelAction != nil { + l = m.ParallelAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.SerialAction != nil { + l = m.SerialAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.CodependentAction != nil { + l = m.CodependentAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *DownloadAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Artifact) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.CacheKey) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.ChecksumAlgorithm) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.ChecksumValue) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *UploadAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Artifact) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *RunAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + if m.ResourceLimits != nil { + l = m.ResourceLimits.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if m.SuppressLogOutput { + n += 2 + } + if len(m.VolumeMountedFiles) > 0 { + for _, e := range m.VolumeMountedFiles { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + return n +} + +func (m *TimeoutAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.DeprecatedTimeoutNs != 0 { + n += 1 + sovActions(uint64(m.DeprecatedTimeoutNs)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if m.TimeoutMs != 0 { + n += 1 + sovActions(uint64(m.TimeoutMs)) + } + return n +} + +func (m *EmitProgressAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.StartMessage) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.SuccessMessage) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.FailureMessagePrefix) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *TryAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *ParallelAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *SerialAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *CodependentAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *ResourceLimits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OptionalNofile != nil { + n += m.OptionalNofile.Size() + } + if m.OptionalNproc != nil { + n += m.OptionalNproc.Size() + } + return n +} + +func (m *ResourceLimits_Nofile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActions(uint64(m.Nofile)) + return n +} +func (m *ResourceLimits_Nproc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActions(uint64(m.Nproc)) + return n +} + +func sovActions(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActions(x uint64) (n int) { + return sovActions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Action) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Action{`, + `DownloadAction:` + strings.Replace(this.DownloadAction.String(), "DownloadAction", "DownloadAction", 1) + `,`, + `UploadAction:` + strings.Replace(this.UploadAction.String(), "UploadAction", "UploadAction", 1) + `,`, + `RunAction:` + strings.Replace(this.RunAction.String(), "RunAction", "RunAction", 1) + `,`, + `TimeoutAction:` + strings.Replace(this.TimeoutAction.String(), "TimeoutAction", "TimeoutAction", 1) + `,`, + `EmitProgressAction:` + strings.Replace(this.EmitProgressAction.String(), "EmitProgressAction", "EmitProgressAction", 1) + `,`, + `TryAction:` + strings.Replace(this.TryAction.String(), "TryAction", "TryAction", 1) + `,`, + `ParallelAction:` + strings.Replace(this.ParallelAction.String(), "ParallelAction", "ParallelAction", 1) + `,`, + `SerialAction:` + strings.Replace(this.SerialAction.String(), "SerialAction", "SerialAction", 1) + `,`, + `CodependentAction:` + strings.Replace(this.CodependentAction.String(), "CodependentAction", "CodependentAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownloadAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownloadAction{`, + `Artifact:` + fmt.Sprintf("%v", this.Artifact) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `CacheKey:` + fmt.Sprintf("%v", this.CacheKey) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `ChecksumAlgorithm:` + fmt.Sprintf("%v", this.ChecksumAlgorithm) + `,`, + `ChecksumValue:` + fmt.Sprintf("%v", this.ChecksumValue) + `,`, + `}`, + }, "") + return s +} +func (this *UploadAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UploadAction{`, + `Artifact:` + fmt.Sprintf("%v", this.Artifact) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `}`, + }, "") + return s +} +func (this *RunAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]*EnvironmentVariable{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMountedFiles := "[]*File{" + for _, f := range this.VolumeMountedFiles { + repeatedStringForVolumeMountedFiles += strings.Replace(fmt.Sprintf("%v", f), "File", "File", 1) + "," + } + repeatedStringForVolumeMountedFiles += "}" + s := strings.Join([]string{`&RunAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ResourceLimits:` + strings.Replace(this.ResourceLimits.String(), "ResourceLimits", "ResourceLimits", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `SuppressLogOutput:` + fmt.Sprintf("%v", this.SuppressLogOutput) + `,`, + `VolumeMountedFiles:` + repeatedStringForVolumeMountedFiles + `,`, + `}`, + }, "") + return s +} +func (this *TimeoutAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeoutAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `DeprecatedTimeoutNs:` + fmt.Sprintf("%v", this.DeprecatedTimeoutNs) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `TimeoutMs:` + fmt.Sprintf("%v", this.TimeoutMs) + `,`, + `}`, + }, "") + return s +} +func (this *EmitProgressAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmitProgressAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `StartMessage:` + fmt.Sprintf("%v", this.StartMessage) + `,`, + `SuccessMessage:` + fmt.Sprintf("%v", this.SuccessMessage) + `,`, + `FailureMessagePrefix:` + fmt.Sprintf("%v", this.FailureMessagePrefix) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *TryAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TryAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *ParallelAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&ParallelAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *SerialAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&SerialAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *CodependentAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&CodependentAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits{`, + `OptionalNofile:` + fmt.Sprintf("%v", this.OptionalNofile) + `,`, + `OptionalNproc:` + fmt.Sprintf("%v", this.OptionalNproc) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits_Nofile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits_Nofile{`, + `Nofile:` + fmt.Sprintf("%v", this.Nofile) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits_Nproc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits_Nproc{`, + `Nproc:` + fmt.Sprintf("%v", this.Nproc) + `,`, + `}`, + }, "") + return s +} +func valueToStringActions(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Action) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Action: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownloadAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownloadAction == nil { + m.DownloadAction = &DownloadAction{} + } + if err := m.DownloadAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UploadAction == nil { + m.UploadAction = &UploadAction{} + } + if err := m.UploadAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAction == nil { + m.RunAction = &RunAction{} + } + if err := m.RunAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeoutAction == nil { + m.TimeoutAction = &TimeoutAction{} + } + if err := m.TimeoutAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmitProgressAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmitProgressAction == nil { + m.EmitProgressAction = &EmitProgressAction{} + } + if err := m.EmitProgressAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TryAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TryAction == nil { + m.TryAction = &TryAction{} + } + if err := m.TryAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParallelAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParallelAction == nil { + m.ParallelAction = &ParallelAction{} + } + if err := m.ParallelAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SerialAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SerialAction == nil { + m.SerialAction = &SerialAction{} + } + if err := m.SerialAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodependentAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CodependentAction == nil { + m.CodependentAction = &CodependentAction{} + } + if err := m.CodependentAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownloadAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownloadAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownloadAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifact = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumAlgorithm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumAlgorithm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UploadAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UploadAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UploadAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifact = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, &EnvironmentVariable{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceLimits == nil { + m.ResourceLimits = &ResourceLimits{} + } + if err := m.ResourceLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuppressLogOutput", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuppressLogOutput = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMountedFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMountedFiles = append(m.VolumeMountedFiles, &File{}) + if err := m.VolumeMountedFiles[len(m.VolumeMountedFiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeoutAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeoutAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeoutAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedTimeoutNs", wireType) + } + m.DeprecatedTimeoutNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedTimeoutNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutMs", wireType) + } + m.TimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmitProgressAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmitProgressAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmitProgressAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureMessagePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureMessagePrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TryAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TryAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TryAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParallelAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParallelAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParallelAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerialAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerialAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerialAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CodependentAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CodependentAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CodependentAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nofile", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalNofile = &ResourceLimits_Nofile{v} + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nproc", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalNproc = &ResourceLimits_Nproc{v} + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActions(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActions + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActions + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActions + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActions = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActions = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActions = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.proto b/vendor/code.cloudfoundry.org/bbs/models/actions.proto new file mode 100644 index 00000000..ab462358 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.proto @@ -0,0 +1,104 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "environment_variables.proto"; +import "file.proto"; + +message Action { + // Note: we only expect one of the following set of fields to be + // set. Previously we used `option (gogoproto.onlyone) = true' but since this + // is now deprecated and oneof introduces a lot of structural changes, we + // deferred on switching to oneof for now until there is a good reason for it. + // disadvantages of using multiple optionals as opposed to oneof are: + // - less memory usage + // disadvantages of using multiple optionals without onlyone: + // - writing our own GetAction/SetAction methods + // action oneof { + DownloadAction download_action = 1 [(gogoproto.jsontag) = "download,omitempty"]; + UploadAction upload_action = 2 [(gogoproto.jsontag) = "upload,omitempty"]; + RunAction run_action = 3 [(gogoproto.jsontag) = "run,omitempty"]; + TimeoutAction timeout_action = 4 [(gogoproto.jsontag) = "timeout,omitempty"]; + EmitProgressAction emit_progress_action = 5 [(gogoproto.jsontag) = "emit_progress,omitempty"]; + TryAction try_action = 6 [(gogoproto.jsontag) = "try,omitempty"]; + ParallelAction parallel_action = 7 [(gogoproto.jsontag) = "parallel,omitempty"]; + SerialAction serial_action = 8 [(gogoproto.jsontag) = "serial,omitempty"]; + CodependentAction codependent_action = 9 [(gogoproto.jsontag) = "codependent,omitempty"]; + // } +} + +message DownloadAction { + string artifact = 1; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string cache_key = 4 [(gogoproto.jsontag) = "cache_key"]; + string log_source = 5; + string user = 6 [(gogoproto.jsontag) = "user"] ; + string checksum_algorithm = 7; + string checksum_value = 8; +} + +message UploadAction { + string artifact = 1; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string log_source = 4; + string user = 5 [(gogoproto.jsontag) = "user"]; +} + +message RunAction { + string path = 1 [(gogoproto.jsontag) = "path"]; + repeated string args = 2; + string dir = 3; + repeated EnvironmentVariable env = 4; + ResourceLimits resource_limits = 5; + string user = 6 [(gogoproto.jsontag) = "user"]; + string log_source = 7; + bool suppress_log_output = 8 [(gogoproto.jsontag) = "suppress_log_output"]; + repeated File volume_mounted_files = 9 [(gogoproto.jsontag) = "volume_mounted_files"]; +} + +message TimeoutAction { + Action action = 1; + int64 deprecated_timeout_ns = 2 [(gogoproto.jsontag) = "timeout,omitempty", deprecated=true]; + string log_source = 3; + int64 timeout_ms = 4 [(gogoproto.jsontag) = "timeout_ms"]; +} + +message EmitProgressAction { + Action action = 1; + string start_message = 2 [(gogoproto.jsontag) = "start_message"]; + string success_message = 3 [(gogoproto.jsontag) = "success_message"]; + string failure_message_prefix = 4 [(gogoproto.jsontag) = "failure_message_prefix"]; + string log_source = 5; +} + +message TryAction { + Action action = 1; + string log_source = 2; +} + +message ParallelAction { + repeated Action actions = 1; + string log_source = 2; +} + +message SerialAction { + repeated Action actions = 1; + string log_source = 2; +} + +message CodependentAction { + repeated Action actions = 1; + string log_source = 2; +} + +message ResourceLimits { + oneof optional_nofile { + uint64 nofile = 1; + } + oneof optional_nproc { + uint64 nproc = 2 [deprecated=true]; + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go new file mode 100644 index 00000000..9ad655b5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go @@ -0,0 +1,521 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "code.cloudfoundry.org/bbs/format" +) + +const ( + ActualLRPStateUnclaimed = "UNCLAIMED" + ActualLRPStateClaimed = "CLAIMED" + ActualLRPStateRunning = "RUNNING" + ActualLRPStateCrashed = "CRASHED" + + CrashResetTimeout = 5 * time.Minute + RetireActualLRPRetryAttempts = 5 +) + +var ActualLRPStates = []string{ + ActualLRPStateUnclaimed, + ActualLRPStateClaimed, + ActualLRPStateRunning, + ActualLRPStateCrashed, +} + +// Deprecated: use the ActualLRPInstances API instead +type ActualLRPChange struct { + Before *ActualLRPGroup + After *ActualLRPGroup +} + +type ActualLRPFilter struct { + Domain string + CellID string + ProcessGuid string + Index *int32 +} + +func NewActualLRPKey(processGuid string, index int32, domain string) ActualLRPKey { + return ActualLRPKey{processGuid, index, domain} +} + +func NewActualLRPInstanceKey(instanceGuid string, cellId string) ActualLRPInstanceKey { + return ActualLRPInstanceKey{instanceGuid, cellId} +} + +func NewActualLRPNetInfo(address string, instanceAddress string, preferredAddress ActualLRPNetInfo_PreferredAddress, ports ...*PortMapping) ActualLRPNetInfo { + return ActualLRPNetInfo{address, ports, instanceAddress, preferredAddress} +} + +func EmptyActualLRPNetInfo() ActualLRPNetInfo { + return NewActualLRPNetInfo("", "", ActualLRPNetInfo_PreferredAddressUnknown) +} + +func (info ActualLRPNetInfo) Empty() bool { + return info.Address == "" && len(info.Ports) == 0 && info.PreferredAddress == ActualLRPNetInfo_PreferredAddressUnknown +} + +func (*ActualLRPNetInfo) Version() format.Version { + return format.V0 +} + +func (d *ActualLRPNetInfo_PreferredAddress) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ActualLRPNetInfo_PreferredAddress_value[name]; found { + *d = ActualLRPNetInfo_PreferredAddress(v) + return nil + } + return fmt.Errorf("invalid preferred address: %s", name) +} + +func (d ActualLRPNetInfo_PreferredAddress) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +func NewPortMapping(hostPort, containerPort uint32) *PortMapping { + return &PortMapping{ + HostPort: hostPort, + ContainerPort: containerPort, + } +} + +func NewPortMappingWithTLSProxy(hostPort, containerPort, tlsHost, tlsContainer uint32) *PortMapping { + return &PortMapping{ + HostPort: hostPort, + ContainerPort: containerPort, + ContainerTlsProxyPort: tlsContainer, + HostTlsProxyPort: tlsHost, + } +} + +func (key ActualLRPInstanceKey) Empty() bool { + return key.InstanceGuid == "" && key.CellId == "" +} +func (a *ActualLRP) Copy() *ActualLRP { + newActualLRP := *a + return &newActualLRP +} + +const StaleUnclaimedActualLRPDuration = 30 * time.Second + +func (actual ActualLRP) ShouldStartUnclaimed(now time.Time) bool { + if actual.State != ActualLRPStateUnclaimed { + return false + } + + if now.Sub(time.Unix(0, actual.Since)) > StaleUnclaimedActualLRPDuration { + return true + } + + return false +} + +func (actual ActualLRP) CellIsMissing(cellSet CellSet) bool { + if actual.State == ActualLRPStateUnclaimed || + actual.State == ActualLRPStateCrashed { + return false + } + + return !cellSet.HasCellID(actual.CellId) +} + +func (actual ActualLRP) ShouldRestartImmediately(calc RestartCalculator) bool { + if actual.State != ActualLRPStateCrashed { + return false + } + + return calc.ShouldRestart(0, 0, actual.CrashCount) +} + +func (actual ActualLRP) ShouldRestartCrash(now time.Time, calc RestartCalculator) bool { + if actual.State != ActualLRPStateCrashed { + return false + } + + return calc.ShouldRestart(now.UnixNano(), actual.Since, actual.CrashCount) +} + +func (actual *ActualLRP) SetRoutable(routable bool) { + actual.OptionalRoutable = &ActualLRP_Routable{ + Routable: routable, + } +} + +func (actual *ActualLRP) RoutableExists() bool { + _, ok := actual.GetOptionalRoutable().(*ActualLRP_Routable) + return ok +} + +func (before ActualLRP) AllowsTransitionTo(lrpKey *ActualLRPKey, instanceKey *ActualLRPInstanceKey, newState string) bool { + if !before.ActualLRPKey.Equal(lrpKey) { + return false + } + + var valid bool + switch before.State { + case ActualLRPStateUnclaimed: + valid = newState == ActualLRPStateUnclaimed || + newState == ActualLRPStateClaimed || + newState == ActualLRPStateRunning + case ActualLRPStateClaimed: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning || + newState == ActualLRPStateCrashed && before.ActualLRPInstanceKey.Equal(instanceKey) + case ActualLRPStateRunning: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateCrashed && before.ActualLRPInstanceKey.Equal(instanceKey) + case ActualLRPStateCrashed: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning && before.ActualLRPInstanceKey.Equal(instanceKey) + } + + return valid +} + +func (d *ActualLRP_Presence) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ActualLRP_Presence_value[name]; found { + *d = ActualLRP_Presence(v) + return nil + } + return fmt.Errorf("invalid presence: %s", name) +} + +func (d ActualLRP_Presence) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// Deprecated: use the ActualLRPInstances API instead +func NewRunningActualLRPGroup(actualLRP *ActualLRP) *ActualLRPGroup { + return &ActualLRPGroup{ + Instance: actualLRP, + } +} + +// Deprecated: use the ActualLRPInstances API instead +func NewEvacuatingActualLRPGroup(actualLRP *ActualLRP) *ActualLRPGroup { + return &ActualLRPGroup{ + Evacuating: actualLRP, + } +} + +// Deprecated: use the ActualLRPInstances API instead +func (group ActualLRPGroup) Resolve() (*ActualLRP, bool, error) { + switch { + case group.Instance == nil && group.Evacuating == nil: + return nil, false, ErrActualLRPGroupInvalid + + case group.Instance == nil: + return group.Evacuating, true, nil + + case group.Evacuating == nil: + return group.Instance, false, nil + + case group.Instance.State == ActualLRPStateRunning || group.Instance.State == ActualLRPStateCrashed: + return group.Instance, false, nil + + default: + return group.Evacuating, true, nil + } +} + +func NewUnclaimedActualLRP(lrpKey ActualLRPKey, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + State: ActualLRPStateUnclaimed, + Since: since, + } +} + +func NewClaimedActualLRP(lrpKey ActualLRPKey, instanceKey ActualLRPInstanceKey, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: instanceKey, + State: ActualLRPStateClaimed, + Since: since, + } +} + +func NewRunningActualLRP(lrpKey ActualLRPKey, instanceKey ActualLRPInstanceKey, netInfo ActualLRPNetInfo, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: instanceKey, + ActualLRPNetInfo: netInfo, + State: ActualLRPStateRunning, + Since: since, + } +} + +func (*ActualLRP) Version() format.Version { + return format.V0 +} + +func (actualLRPInfo *ActualLRPInfo) ToActualLRP(lrpKey ActualLRPKey, lrpInstanceKey ActualLRPInstanceKey) *ActualLRP { + if actualLRPInfo == nil { + return nil + } + lrp := ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: lrpInstanceKey, + ActualLRPNetInfo: actualLRPInfo.ActualLRPNetInfo, + AvailabilityZone: actualLRPInfo.AvailabilityZone, + CrashCount: actualLRPInfo.CrashCount, + CrashReason: actualLRPInfo.CrashReason, + State: actualLRPInfo.State, + PlacementError: actualLRPInfo.PlacementError, + Since: actualLRPInfo.Since, + ModificationTag: actualLRPInfo.ModificationTag, + Presence: actualLRPInfo.Presence, + } + + if actualLRPInfo.RoutableExists() { + lrp.SetRoutable(actualLRPInfo.GetRoutable()) + } + + return &lrp +} + +func (actual *ActualLRP) ToActualLRPInfo() *ActualLRPInfo { + if actual == nil { + return nil + } + info := ActualLRPInfo{ + ActualLRPNetInfo: actual.ActualLRPNetInfo, + AvailabilityZone: actual.AvailabilityZone, + CrashCount: actual.CrashCount, + CrashReason: actual.CrashReason, + State: actual.State, + PlacementError: actual.PlacementError, + Since: actual.Since, + ModificationTag: actual.ModificationTag, + Presence: actual.Presence, + } + + if actual.RoutableExists() { + info.SetRoutable(actual.GetRoutable()) + } + return &info +} + +// Deprecated: use the ActualLRPInstances API instead +func (actual *ActualLRP) ToActualLRPGroup() *ActualLRPGroup { + if actual == nil { + return nil + } + + switch actual.Presence { + case ActualLRP_Evacuating: + return &ActualLRPGroup{Evacuating: actual} + default: + return &ActualLRPGroup{Instance: actual} + } +} + +func (actual ActualLRP) Validate() error { + var validationError ValidationError + + err := actual.ActualLRPKey.Validate() + if err != nil { + validationError = validationError.Append(err) + } + + if actual.Since == 0 { + validationError = validationError.Append(ErrInvalidField{"since"}) + } + + switch actual.State { + case ActualLRPStateUnclaimed: + if !actual.ActualLRPInstanceKey.Empty() { + validationError = validationError.Append(errors.New("instance key cannot be set when state is unclaimed")) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is unclaimed")) + } + if actual.Presence != ActualLRP_Ordinary { + validationError = validationError.Append(errors.New("presence cannot be set when state is unclaimed")) + } + + case ActualLRPStateClaimed: + if err := actual.ActualLRPInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is claimed")) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is claimed")) + } + + case ActualLRPStateRunning: + if err := actual.ActualLRPInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + if err := actual.ActualLRPNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is running")) + } + + case ActualLRPStateCrashed: + if !actual.ActualLRPInstanceKey.Empty() { + validationError = validationError.Append(errors.New("instance key cannot be set when state is crashed")) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is crashed")) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is crashed")) + } + + default: + validationError = validationError.Append(ErrInvalidField{"state"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (key *ActualLRPKey) Validate() error { + var validationError ValidationError + + if key.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if key.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if key.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (key *ActualLRPNetInfo) Validate() error { + var validationError ValidationError + + if key.Address == "" { + return validationError.Append(ErrInvalidField{"address"}) + } + + return nil +} + +func (key *ActualLRPInstanceKey) Validate() error { + var validationError ValidationError + + if key.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if key.InstanceGuid == "" { + validationError = validationError.Append(ErrInvalidField{"instance_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// hasHigherPriority returns true if lrp1 takes precendence over lrp2 +func hasHigherPriority(lrp1, lrp2 *ActualLRP) bool { + if lrp1 == nil { + return false + } + + if lrp2 == nil { + return true + } + + if lrp1.Presence == ActualLRP_Ordinary { + switch lrp1.State { + case ActualLRPStateRunning: + return true + case ActualLRPStateClaimed: + return lrp2.State != ActualLRPStateRunning && lrp2.State != ActualLRPStateClaimed + } + } else if lrp1.Presence == ActualLRP_Suspect { + switch lrp1.State { + case ActualLRPStateRunning: + return lrp2.State != ActualLRPStateRunning + case ActualLRPStateClaimed: + return lrp2.State != ActualLRPStateRunning + } + } + // Cases where we are comparing two LRPs with the same presence have undefined behavior since it shouldn't happen + // with the way they're stored in the database + return false +} + +// ResolveActualLRPGroups convert the given set of lrp instances into +// ActualLRPGroup. This conversion is lossy. A suspect LRP is given +// precendence over an Ordinary instance if it is Running. Otherwise, the +// Ordinary instance is returned in the Instance field of the ActualLRPGroup. +// Deprecated: use the ActualLRPInstances API instead +func ResolveActualLRPGroups(lrps []*ActualLRP) []*ActualLRPGroup { + mapOfGroups := map[ActualLRPKey]*ActualLRPGroup{} + result := []*ActualLRPGroup{} + for _, actualLRP := range lrps { + // Every actual LRP has potentially 2 rows in the database: one for the instance + // one for the evacuating. When building the list of actual LRP groups (where + // a group is the instance and corresponding evacuating), make sure we don't add the same + // actual lrp twice. + if mapOfGroups[actualLRP.ActualLRPKey] == nil { + mapOfGroups[actualLRP.ActualLRPKey] = &ActualLRPGroup{} + result = append(result, mapOfGroups[actualLRP.ActualLRPKey]) + } + if actualLRP.Presence == ActualLRP_Evacuating { + mapOfGroups[actualLRP.ActualLRPKey].Evacuating = actualLRP + } else if hasHigherPriority(actualLRP, mapOfGroups[actualLRP.ActualLRPKey].Instance) { + mapOfGroups[actualLRP.ActualLRPKey].Instance = actualLRP + } + } + + return result +} + +// ResolveToActualLRPGroup calls ResolveActualLRPGroups and return the first +// LRP group. It panics if there are more than one group. If there no LRP +// groups were returned by ResolveActualLRPGroups, then an empty ActualLRPGroup +// is returned. +// Deprecated: use the ActualLRPInstances API instead +func ResolveActualLRPGroup(lrps []*ActualLRP) *ActualLRPGroup { + actualLRPGroups := ResolveActualLRPGroups(lrps) + switch len(actualLRPGroups) { + case 0: + return &ActualLRPGroup{} + case 1: + return actualLRPGroups[0] + default: + panic("shouldn't get here") + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go new file mode 100644 index 00000000..e9c46897 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go @@ -0,0 +1,3220 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actual_lrp.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ActualLRPNetInfo_PreferredAddress int32 + +const ( + ActualLRPNetInfo_PreferredAddressUnknown ActualLRPNetInfo_PreferredAddress = 0 + ActualLRPNetInfo_PreferredAddressInstance ActualLRPNetInfo_PreferredAddress = 1 + ActualLRPNetInfo_PreferredAddressHost ActualLRPNetInfo_PreferredAddress = 2 +) + +var ActualLRPNetInfo_PreferredAddress_name = map[int32]string{ + 0: "UNKNOWN", + 1: "INSTANCE", + 2: "HOST", +} + +var ActualLRPNetInfo_PreferredAddress_value = map[string]int32{ + "UNKNOWN": 0, + "INSTANCE": 1, + "HOST": 2, +} + +func (ActualLRPNetInfo_PreferredAddress) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{4, 0} +} + +type ActualLRP_Presence int32 + +const ( + ActualLRP_Ordinary ActualLRP_Presence = 0 + ActualLRP_Evacuating ActualLRP_Presence = 1 + ActualLRP_Suspect ActualLRP_Presence = 2 +) + +var ActualLRP_Presence_name = map[int32]string{ + 0: "ORDINARY", + 1: "EVACUATING", + 2: "SUSPECT", +} + +var ActualLRP_Presence_value = map[string]int32{ + "ORDINARY": 0, + "EVACUATING": 1, + "SUSPECT": 2, +} + +func (ActualLRP_Presence) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{6, 0} +} + +// Deprecated: Do not use. +type ActualLRPGroup struct { + Instance *ActualLRP `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"` + Evacuating *ActualLRP `protobuf:"bytes,2,opt,name=evacuating,proto3" json:"evacuating,omitempty"` +} + +func (m *ActualLRPGroup) Reset() { *m = ActualLRPGroup{} } +func (*ActualLRPGroup) ProtoMessage() {} +func (*ActualLRPGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{0} +} +func (m *ActualLRPGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroup.Merge(m, src) +} +func (m *ActualLRPGroup) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroup) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroup proto.InternalMessageInfo + +func (m *ActualLRPGroup) GetInstance() *ActualLRP { + if m != nil { + return m.Instance + } + return nil +} + +func (m *ActualLRPGroup) GetEvacuating() *ActualLRP { + if m != nil { + return m.Evacuating + } + return nil +} + +type PortMapping struct { + ContainerPort uint32 `protobuf:"varint,1,opt,name=container_port,json=containerPort,proto3" json:"container_port"` + HostPort uint32 `protobuf:"varint,2,opt,name=host_port,json=hostPort,proto3" json:"host_port"` + ContainerTlsProxyPort uint32 `protobuf:"varint,3,opt,name=container_tls_proxy_port,json=containerTlsProxyPort,proto3" json:"container_tls_proxy_port"` + HostTlsProxyPort uint32 `protobuf:"varint,4,opt,name=host_tls_proxy_port,json=hostTlsProxyPort,proto3" json:"host_tls_proxy_port"` +} + +func (m *PortMapping) Reset() { *m = PortMapping{} } +func (*PortMapping) ProtoMessage() {} +func (*PortMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{1} +} +func (m *PortMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortMapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortMapping.Merge(m, src) +} +func (m *PortMapping) XXX_Size() int { + return m.Size() +} +func (m *PortMapping) XXX_DiscardUnknown() { + xxx_messageInfo_PortMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_PortMapping proto.InternalMessageInfo + +func (m *PortMapping) GetContainerPort() uint32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *PortMapping) GetHostPort() uint32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *PortMapping) GetContainerTlsProxyPort() uint32 { + if m != nil { + return m.ContainerTlsProxyPort + } + return 0 +} + +func (m *PortMapping) GetHostTlsProxyPort() uint32 { + if m != nil { + return m.HostTlsProxyPort + } + return 0 +} + +type ActualLRPKey struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` +} + +func (m *ActualLRPKey) Reset() { *m = ActualLRPKey{} } +func (*ActualLRPKey) ProtoMessage() {} +func (*ActualLRPKey) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{2} +} +func (m *ActualLRPKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPKey.Merge(m, src) +} +func (m *ActualLRPKey) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPKey) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPKey.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPKey proto.InternalMessageInfo + +func (m *ActualLRPKey) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPKey) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ActualLRPKey) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +type ActualLRPInstanceKey struct { + InstanceGuid string `protobuf:"bytes,1,opt,name=instance_guid,json=instanceGuid,proto3" json:"instance_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *ActualLRPInstanceKey) Reset() { *m = ActualLRPInstanceKey{} } +func (*ActualLRPInstanceKey) ProtoMessage() {} +func (*ActualLRPInstanceKey) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{3} +} +func (m *ActualLRPInstanceKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceKey.Merge(m, src) +} +func (m *ActualLRPInstanceKey) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceKey) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceKey.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceKey proto.InternalMessageInfo + +func (m *ActualLRPInstanceKey) GetInstanceGuid() string { + if m != nil { + return m.InstanceGuid + } + return "" +} + +func (m *ActualLRPInstanceKey) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type ActualLRPNetInfo struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address"` + Ports []*PortMapping `protobuf:"bytes,2,rep,name=ports,proto3" json:"ports"` + InstanceAddress string `protobuf:"bytes,3,opt,name=instance_address,json=instanceAddress,proto3" json:"instance_address,omitempty"` + PreferredAddress ActualLRPNetInfo_PreferredAddress `protobuf:"varint,4,opt,name=preferred_address,json=preferredAddress,proto3,enum=models.ActualLRPNetInfo_PreferredAddress" json:"preferred_address"` +} + +func (m *ActualLRPNetInfo) Reset() { *m = ActualLRPNetInfo{} } +func (*ActualLRPNetInfo) ProtoMessage() {} +func (*ActualLRPNetInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{4} +} +func (m *ActualLRPNetInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPNetInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPNetInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPNetInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPNetInfo.Merge(m, src) +} +func (m *ActualLRPNetInfo) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPNetInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPNetInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPNetInfo proto.InternalMessageInfo + +func (m *ActualLRPNetInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ActualLRPNetInfo) GetPorts() []*PortMapping { + if m != nil { + return m.Ports + } + return nil +} + +func (m *ActualLRPNetInfo) GetInstanceAddress() string { + if m != nil { + return m.InstanceAddress + } + return "" +} + +func (m *ActualLRPNetInfo) GetPreferredAddress() ActualLRPNetInfo_PreferredAddress { + if m != nil { + return m.PreferredAddress + } + return ActualLRPNetInfo_PreferredAddressUnknown +} + +type ActualLRPInternalRoute struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname"` +} + +func (m *ActualLRPInternalRoute) Reset() { *m = ActualLRPInternalRoute{} } +func (*ActualLRPInternalRoute) ProtoMessage() {} +func (*ActualLRPInternalRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{5} +} +func (m *ActualLRPInternalRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInternalRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInternalRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInternalRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInternalRoute.Merge(m, src) +} +func (m *ActualLRPInternalRoute) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInternalRoute) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInternalRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInternalRoute proto.InternalMessageInfo + +func (m *ActualLRPInternalRoute) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +type ActualLRP struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3,embedded=actual_lrp_net_info" json:""` + CrashCount int32 `protobuf:"varint,4,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,5,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state"` + PlacementError string `protobuf:"bytes,7,opt,name=placement_error,json=placementError,proto3" json:"placement_error,omitempty"` + Since int64 `protobuf:"varint,8,opt,name=since,proto3" json:"since"` + ModificationTag ModificationTag `protobuf:"bytes,9,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag"` + Presence ActualLRP_Presence `protobuf:"varint,10,opt,name=presence,proto3,enum=models.ActualLRP_Presence" json:"presence"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,11,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,12,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *ActualLRP_Routable + OptionalRoutable isActualLRP_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,14,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *ActualLRP) Reset() { *m = ActualLRP{} } +func (*ActualLRP) ProtoMessage() {} +func (*ActualLRP) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{6} +} +func (m *ActualLRP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRP) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRP.Merge(m, src) +} +func (m *ActualLRP) XXX_Size() int { + return m.Size() +} +func (m *ActualLRP) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRP.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRP proto.InternalMessageInfo + +type isActualLRP_OptionalRoutable interface { + isActualLRP_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRP_Routable struct { + Routable bool `protobuf:"varint,13,opt,name=routable,proto3,oneof" json:"routable"` +} + +func (*ActualLRP_Routable) isActualLRP_OptionalRoutable() {} + +func (m *ActualLRP) GetOptionalRoutable() isActualLRP_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *ActualLRP) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRP) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRP) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *ActualLRP) GetPlacementError() string { + if m != nil { + return m.PlacementError + } + return "" +} + +func (m *ActualLRP) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +func (m *ActualLRP) GetModificationTag() ModificationTag { + if m != nil { + return m.ModificationTag + } + return ModificationTag{} +} + +func (m *ActualLRP) GetPresence() ActualLRP_Presence { + if m != nil { + return m.Presence + } + return ActualLRP_Ordinary +} + +func (m *ActualLRP) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *ActualLRP) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *ActualLRP) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*ActualLRP_Routable); ok { + return x.Routable + } + return false +} + +func (m *ActualLRP) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRP) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRP_Routable)(nil), + } +} + +func init() { + proto.RegisterEnum("models.ActualLRPNetInfo_PreferredAddress", ActualLRPNetInfo_PreferredAddress_name, ActualLRPNetInfo_PreferredAddress_value) + proto.RegisterEnum("models.ActualLRP_Presence", ActualLRP_Presence_name, ActualLRP_Presence_value) + proto.RegisterType((*ActualLRPGroup)(nil), "models.ActualLRPGroup") + proto.RegisterType((*PortMapping)(nil), "models.PortMapping") + proto.RegisterType((*ActualLRPKey)(nil), "models.ActualLRPKey") + proto.RegisterType((*ActualLRPInstanceKey)(nil), "models.ActualLRPInstanceKey") + proto.RegisterType((*ActualLRPNetInfo)(nil), "models.ActualLRPNetInfo") + proto.RegisterType((*ActualLRPInternalRoute)(nil), "models.ActualLRPInternalRoute") + proto.RegisterType((*ActualLRP)(nil), "models.ActualLRP") + proto.RegisterMapType((map[string]string)(nil), "models.ActualLRP.MetricTagsEntry") +} + +func init() { proto.RegisterFile("actual_lrp.proto", fileDescriptor_25e5e77bfca46c1a) } + +var fileDescriptor_25e5e77bfca46c1a = []byte{ + // 1187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x56, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x16, 0xe5, 0xd8, 0x96, 0x46, 0xb2, 0x4c, 0xaf, 0x9d, 0x98, 0xd0, 0x1b, 0x90, 0x8a, 0xf0, + 0x16, 0x75, 0x02, 0xc4, 0x69, 0x9d, 0xa0, 0x68, 0x03, 0xf4, 0x60, 0x3a, 0x6e, 0x6c, 0x24, 0x91, + 0x8d, 0xb5, 0xdd, 0xa2, 0x1f, 0x80, 0xba, 0xa6, 0xd6, 0x0a, 0x11, 0x8a, 0x4b, 0x2c, 0x57, 0x6e, + 0xd4, 0x53, 0x8f, 0x85, 0xd1, 0x43, 0x81, 0x5e, 0x7a, 0xf1, 0xbd, 0xbf, 0xa1, 0xbf, 0x20, 0x47, + 0x1f, 0x73, 0x22, 0x1a, 0xe5, 0x52, 0xf0, 0x94, 0x9f, 0x50, 0xec, 0xf2, 0x23, 0xb4, 0x14, 0x9f, + 0x76, 0xf7, 0xd9, 0x99, 0x67, 0x86, 0x33, 0xcf, 0x8e, 0x04, 0x3a, 0x71, 0xc4, 0x90, 0x78, 0x5d, + 0x8f, 0x07, 0xeb, 0x01, 0x67, 0x82, 0xa1, 0xb9, 0x01, 0xeb, 0x51, 0x2f, 0x6c, 0xde, 0xed, 0xbb, + 0xe2, 0xf9, 0xf0, 0x78, 0xdd, 0x61, 0x83, 0x7b, 0x7d, 0xd6, 0x67, 0xf7, 0xd4, 0xf5, 0xf1, 0xf0, + 0x44, 0x9d, 0xd4, 0x41, 0xed, 0x12, 0xb7, 0xe6, 0x8d, 0x01, 0xeb, 0xb9, 0x27, 0xae, 0x43, 0x84, + 0xcb, 0xfc, 0xae, 0x20, 0xfd, 0x04, 0x6f, 0x9f, 0x42, 0x63, 0x53, 0x85, 0x78, 0x8a, 0xf7, 0x1f, + 0x73, 0x36, 0x0c, 0xd0, 0x5d, 0xa8, 0xb8, 0x7e, 0x28, 0x88, 0xef, 0x50, 0x43, 0x6b, 0x69, 0x6b, + 0xb5, 0x8d, 0xa5, 0xf5, 0x24, 0xe6, 0x7a, 0x6e, 0x89, 0x73, 0x13, 0xf4, 0x29, 0x00, 0x3d, 0x25, + 0xce, 0x90, 0x08, 0xd7, 0xef, 0x1b, 0xe5, 0xab, 0x1c, 0x0a, 0x46, 0x0f, 0xcb, 0x86, 0xd6, 0xfe, + 0xa3, 0x0c, 0xb5, 0x7d, 0xc6, 0xc5, 0x33, 0x12, 0x04, 0xae, 0xdf, 0x47, 0x5f, 0x40, 0xc3, 0x61, + 0xbe, 0x20, 0xae, 0x4f, 0x79, 0x37, 0x60, 0x5c, 0xa8, 0xd8, 0x0b, 0x36, 0x8a, 0x23, 0x6b, 0xe2, + 0x06, 0x2f, 0xe4, 0x67, 0xc9, 0x80, 0xee, 0x40, 0xf5, 0x39, 0x0b, 0x45, 0xe2, 0x55, 0x56, 0x5e, + 0x0b, 0x71, 0x64, 0xbd, 0x07, 0x71, 0x45, 0x6e, 0x95, 0xed, 0x11, 0x18, 0xef, 0xc9, 0x84, 0x17, + 0x76, 0x03, 0xce, 0x5e, 0x8e, 0x12, 0xd7, 0x19, 0xe5, 0x7a, 0x33, 0x8e, 0xac, 0x2b, 0x6d, 0xf0, + 0xf5, 0xfc, 0xe6, 0xd0, 0x0b, 0xf7, 0x25, 0xae, 0x68, 0xbf, 0x82, 0x65, 0x15, 0x6d, 0x82, 0xf1, + 0x9a, 0x62, 0x5c, 0x8d, 0x23, 0xeb, 0x43, 0xd7, 0x58, 0x97, 0x60, 0x91, 0xa7, 0xfd, 0xab, 0x06, + 0xf5, 0xbc, 0x66, 0x4f, 0xe8, 0x08, 0xdd, 0x87, 0x7a, 0xc0, 0x99, 0x43, 0xc3, 0xb0, 0xdb, 0x1f, + 0xba, 0x3d, 0x55, 0x94, 0xaa, 0xad, 0xc7, 0x91, 0x75, 0x09, 0xc7, 0xb5, 0xf4, 0xf4, 0x78, 0xe8, + 0xf6, 0x90, 0x05, 0xb3, 0xae, 0xdf, 0xa3, 0x2f, 0x55, 0x31, 0x66, 0xed, 0x6a, 0x1c, 0x59, 0x09, + 0x80, 0x93, 0x05, 0xb5, 0x61, 0xae, 0xc7, 0x06, 0xc4, 0xf5, 0xd5, 0x37, 0x57, 0x6d, 0x88, 0x23, + 0x2b, 0x45, 0x70, 0xba, 0xb6, 0x05, 0xac, 0xe4, 0x99, 0xec, 0xa6, 0xcd, 0x96, 0x19, 0x7d, 0x06, + 0x0b, 0x59, 0xef, 0x8b, 0x29, 0x2d, 0xc5, 0x91, 0x75, 0xf9, 0x02, 0xd7, 0xb3, 0xa3, 0x4a, 0xea, + 0xff, 0x30, 0xef, 0x50, 0xcf, 0xeb, 0xba, 0x3d, 0x95, 0x56, 0xd5, 0xae, 0xc5, 0x91, 0x95, 0x41, + 0x78, 0x4e, 0x6e, 0x76, 0x7b, 0xed, 0x3f, 0x67, 0x40, 0xcf, 0xc3, 0x76, 0xa8, 0xd8, 0xf5, 0x4f, + 0x18, 0xfa, 0x08, 0xe6, 0x49, 0xaf, 0xc7, 0x69, 0x18, 0xa6, 0xc1, 0x94, 0x6b, 0x0a, 0xe1, 0x6c, + 0x83, 0x1e, 0xc0, 0xac, 0x2c, 0x6b, 0x68, 0x94, 0x5b, 0x33, 0x6b, 0xb5, 0x8d, 0xe5, 0x4c, 0x84, + 0x05, 0x99, 0x25, 0xb5, 0x50, 0x56, 0x38, 0x59, 0xd0, 0x6d, 0xd0, 0xf3, 0xb4, 0xb3, 0x28, 0xaa, + 0x2a, 0x78, 0x31, 0xc3, 0x37, 0xd3, 0x00, 0x03, 0x58, 0x0a, 0x38, 0x3d, 0xa1, 0x9c, 0xd3, 0x5e, + 0x6e, 0x2b, 0x7b, 0xdc, 0xd8, 0xb8, 0x3d, 0xa5, 0xf8, 0x34, 0xf9, 0xf5, 0xfd, 0xcc, 0x23, 0x65, + 0xb1, 0xaf, 0xc7, 0x91, 0x35, 0xcd, 0x83, 0xf5, 0x60, 0xc2, 0xb0, 0xfd, 0x9b, 0x06, 0xfa, 0xa4, + 0x37, 0x5a, 0x83, 0xf9, 0xa3, 0xce, 0x93, 0xce, 0xde, 0x37, 0x1d, 0xbd, 0xd4, 0xfc, 0xdf, 0xd9, + 0x79, 0x6b, 0x75, 0xd2, 0xe4, 0xc8, 0x7f, 0xe1, 0xb3, 0x9f, 0x7c, 0x74, 0x07, 0x2a, 0xbb, 0x9d, + 0x83, 0xc3, 0xcd, 0xce, 0xd6, 0xb6, 0xae, 0x35, 0x6f, 0x9e, 0x9d, 0xb7, 0x8c, 0x49, 0xd3, 0xac, + 0xaf, 0xa8, 0x0d, 0xd7, 0x76, 0xf6, 0x0e, 0x0e, 0xf5, 0x72, 0xd3, 0x38, 0x3b, 0x6f, 0xad, 0x4c, + 0xda, 0xed, 0xb0, 0x50, 0xb4, 0x6d, 0xb8, 0x51, 0x10, 0x84, 0xa0, 0xdc, 0x27, 0x1e, 0x66, 0x43, + 0x41, 0xd1, 0x1a, 0xa8, 0x07, 0xe6, 0x93, 0x01, 0x4d, 0x1b, 0x54, 0x8f, 0x23, 0x2b, 0xc7, 0x70, + 0xbe, 0x6b, 0xff, 0x5d, 0x81, 0x6a, 0x4e, 0x82, 0x76, 0xa0, 0xf1, 0x7e, 0xbc, 0x75, 0x5f, 0xd0, + 0x51, 0x3a, 0x6f, 0x56, 0xa6, 0x8a, 0xf9, 0x84, 0x8e, 0xec, 0xfa, 0xab, 0xc8, 0x2a, 0x5d, 0x44, + 0x96, 0x16, 0x47, 0x56, 0x09, 0xd7, 0x13, 0xcf, 0xa7, 0x3c, 0x90, 0xa2, 0x24, 0xb0, 0x5a, 0x60, + 0xca, 0xfb, 0x29, 0x29, 0x93, 0x89, 0x74, 0x73, 0x8a, 0xb2, 0xa0, 0xe9, 0x09, 0xea, 0x95, 0x9c, + 0xba, 0xa8, 0xfb, 0x23, 0x58, 0x2e, 0x84, 0xf0, 0xa9, 0xe8, 0xba, 0xfe, 0x09, 0x53, 0x52, 0xa9, + 0x6d, 0x18, 0x57, 0xb5, 0x7f, 0x82, 0x5a, 0xcf, 0xa9, 0x33, 0x6d, 0x7f, 0x02, 0x35, 0x87, 0x93, + 0xf0, 0x79, 0xd7, 0x61, 0x43, 0x3f, 0x99, 0x18, 0xb3, 0xf6, 0x62, 0x1c, 0x59, 0x45, 0x18, 0x83, + 0x3a, 0x6c, 0xc9, 0x3d, 0xba, 0x05, 0xf5, 0xe4, 0x8a, 0x53, 0x12, 0x32, 0xdf, 0x98, 0x55, 0x62, + 0x4d, 0xcc, 0xb1, 0x82, 0xe4, 0x00, 0x08, 0x05, 0x11, 0xd4, 0x98, 0x53, 0xdd, 0x50, 0xa2, 0x57, + 0x00, 0x4e, 0x16, 0xf4, 0x31, 0x2c, 0x06, 0x1e, 0x71, 0xe8, 0x80, 0xfa, 0xa2, 0x4b, 0x39, 0x67, + 0xdc, 0x98, 0x57, 0x34, 0x8d, 0x1c, 0xde, 0x96, 0xa8, 0x62, 0x72, 0xe5, 0x2f, 0x41, 0xa5, 0xa5, + 0xad, 0xcd, 0xa4, 0x4c, 0x12, 0xc0, 0xc9, 0x82, 0x7e, 0x00, 0x7d, 0xf2, 0x97, 0xc5, 0xa8, 0xaa, + 0x9a, 0xac, 0x66, 0x35, 0x79, 0x56, 0xb8, 0x3f, 0x24, 0x7d, 0xdb, 0x90, 0x25, 0x89, 0x23, 0x6b, + 0xca, 0x11, 0x2f, 0x0e, 0x2e, 0x9b, 0xa2, 0x47, 0x50, 0x09, 0x38, 0x0d, 0xa9, 0xcc, 0x00, 0xd4, + 0x43, 0x6b, 0x4e, 0x55, 0x5a, 0xbe, 0x30, 0x65, 0x91, 0xa8, 0x2e, 0xb3, 0xc7, 0xf9, 0x0e, 0x7d, + 0x0f, 0xcd, 0x4b, 0xea, 0x48, 0xb4, 0xdb, 0xe5, 0x52, 0xbc, 0xa1, 0x51, 0x53, 0xd3, 0xc2, 0xfc, + 0x80, 0x40, 0x0a, 0x1a, 0xc7, 0xab, 0x05, 0x51, 0x14, 0xf0, 0x10, 0xd9, 0x50, 0x1b, 0x50, 0xc1, + 0x5d, 0x47, 0x7e, 0x41, 0x68, 0xd4, 0x15, 0xdb, 0xad, 0xe9, 0x2c, 0x9f, 0x29, 0xa3, 0x43, 0xd2, + 0x0f, 0xb7, 0x7d, 0xc1, 0x47, 0x18, 0x06, 0x39, 0x20, 0x9f, 0xaa, 0x4c, 0x86, 0x1c, 0x7b, 0xd4, + 0x58, 0x68, 0x69, 0x6b, 0x95, 0xe4, 0x53, 0x32, 0x6c, 0xa7, 0x84, 0xf3, 0x3d, 0xb2, 0x61, 0x89, + 0x9c, 0x12, 0xd7, 0x23, 0xc7, 0xae, 0xe7, 0x8a, 0x51, 0xf7, 0x67, 0xe6, 0x53, 0xa3, 0xa1, 0xfa, + 0xac, 0x26, 0xcb, 0xd4, 0x25, 0xd6, 0x8b, 0xd0, 0x77, 0xcc, 0xa7, 0xcd, 0x2f, 0x61, 0x71, 0x22, + 0x1d, 0xa4, 0xc3, 0x4c, 0xf6, 0x00, 0xab, 0x58, 0x6e, 0xd1, 0x0a, 0xcc, 0x9e, 0x12, 0x6f, 0x48, + 0x93, 0x71, 0x8d, 0x93, 0xc3, 0xc3, 0xf2, 0xe7, 0x5a, 0xfb, 0x47, 0xa8, 0x64, 0x35, 0x47, 0x4d, + 0xa8, 0xec, 0xe1, 0x47, 0xbb, 0x9d, 0x4d, 0xfc, 0xad, 0x5e, 0x6a, 0xd6, 0xcf, 0xce, 0x5b, 0x95, + 0x3d, 0xde, 0x73, 0x7d, 0xc2, 0x47, 0xc8, 0x04, 0xd8, 0xfe, 0x7a, 0x73, 0xeb, 0x68, 0xf3, 0x70, + 0xb7, 0xf3, 0x58, 0xd7, 0x9a, 0x8d, 0xb3, 0xf3, 0x16, 0x6c, 0xe7, 0xff, 0x03, 0x90, 0x01, 0xf3, + 0x07, 0x47, 0x07, 0xfb, 0xdb, 0x5b, 0x72, 0xf0, 0xd4, 0xce, 0xce, 0x5b, 0xf3, 0x07, 0xc3, 0x30, + 0xa0, 0x8e, 0xb0, 0x97, 0x61, 0x89, 0x05, 0x52, 0x04, 0x69, 0x9b, 0xe4, 0x97, 0xdb, 0x0f, 0x2e, + 0xde, 0x98, 0xda, 0xeb, 0x37, 0x66, 0xe9, 0xdd, 0x1b, 0x53, 0xfb, 0x65, 0x6c, 0x6a, 0x7f, 0x8d, + 0x4d, 0xed, 0xd5, 0xd8, 0xd4, 0x2e, 0xc6, 0xa6, 0xf6, 0xcf, 0xd8, 0xd4, 0xfe, 0x1d, 0x9b, 0xa5, + 0x77, 0x63, 0x53, 0xfb, 0xfd, 0xad, 0x59, 0xba, 0x78, 0x6b, 0x96, 0x5e, 0xbf, 0x35, 0x4b, 0xc7, + 0x73, 0xea, 0x7f, 0xce, 0xfd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xf8, 0x7a, 0x0a, 0x4a, + 0x09, 0x00, 0x00, +} + +func (x ActualLRPNetInfo_PreferredAddress) String() string { + s, ok := ActualLRPNetInfo_PreferredAddress_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ActualLRP_Presence) String() string { + s, ok := ActualLRP_Presence_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *ActualLRPGroup) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroup) + if !ok { + that2, ok := that.(ActualLRPGroup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Instance.Equal(that1.Instance) { + return false + } + if !this.Evacuating.Equal(that1.Evacuating) { + return false + } + return true +} +func (this *PortMapping) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PortMapping) + if !ok { + that2, ok := that.(PortMapping) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ContainerPort != that1.ContainerPort { + return false + } + if this.HostPort != that1.HostPort { + return false + } + if this.ContainerTlsProxyPort != that1.ContainerTlsProxyPort { + return false + } + if this.HostTlsProxyPort != that1.HostTlsProxyPort { + return false + } + return true +} +func (this *ActualLRPKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPKey) + if !ok { + that2, ok := that.(ActualLRPKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if this.Domain != that1.Domain { + return false + } + return true +} +func (this *ActualLRPInstanceKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceKey) + if !ok { + that2, ok := that.(ActualLRPInstanceKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.InstanceGuid != that1.InstanceGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *ActualLRPNetInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPNetInfo) + if !ok { + that2, ok := that.(ActualLRPNetInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Address != that1.Address { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if !this.Ports[i].Equal(that1.Ports[i]) { + return false + } + } + if this.InstanceAddress != that1.InstanceAddress { + return false + } + if this.PreferredAddress != that1.PreferredAddress { + return false + } + return true +} +func (this *ActualLRPInternalRoute) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInternalRoute) + if !ok { + that2, ok := that.(ActualLRPInternalRoute) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + return true +} +func (this *ActualLRP) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRP) + if !ok { + that2, ok := that.(ActualLRP) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if !this.ActualLRPNetInfo.Equal(&that1.ActualLRPNetInfo) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.State != that1.State { + return false + } + if this.PlacementError != that1.PlacementError { + return false + } + if this.Since != that1.Since { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if this.Presence != that1.Presence { + return false + } + if len(this.ActualLrpInternalRoutes) != len(that1.ActualLrpInternalRoutes) { + return false + } + for i := range this.ActualLrpInternalRoutes { + if !this.ActualLrpInternalRoutes[i].Equal(that1.ActualLrpInternalRoutes[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if this.MetricTags[i] != that1.MetricTags[i] { + return false + } + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *ActualLRP_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRP_Routable) + if !ok { + that2, ok := that.(ActualLRP_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *ActualLRPGroup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroup{") + if this.Instance != nil { + s = append(s, "Instance: "+fmt.Sprintf("%#v", this.Instance)+",\n") + } + if this.Evacuating != nil { + s = append(s, "Evacuating: "+fmt.Sprintf("%#v", this.Evacuating)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PortMapping) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.PortMapping{") + s = append(s, "ContainerPort: "+fmt.Sprintf("%#v", this.ContainerPort)+",\n") + s = append(s, "HostPort: "+fmt.Sprintf("%#v", this.HostPort)+",\n") + s = append(s, "ContainerTlsProxyPort: "+fmt.Sprintf("%#v", this.ContainerTlsProxyPort)+",\n") + s = append(s, "HostTlsProxyPort: "+fmt.Sprintf("%#v", this.HostTlsProxyPort)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.ActualLRPKey{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceKey{") + s = append(s, "InstanceGuid: "+fmt.Sprintf("%#v", this.InstanceGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPNetInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.ActualLRPNetInfo{") + s = append(s, "Address: "+fmt.Sprintf("%#v", this.Address)+",\n") + if this.Ports != nil { + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + } + s = append(s, "InstanceAddress: "+fmt.Sprintf("%#v", this.InstanceAddress)+",\n") + s = append(s, "PreferredAddress: "+fmt.Sprintf("%#v", this.PreferredAddress)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInternalRoute) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPInternalRoute{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRP) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 18) + s = append(s, "&models.ActualLRP{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPNetInfo: "+strings.Replace(this.ActualLRPNetInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "PlacementError: "+fmt.Sprintf("%#v", this.PlacementError)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Presence: "+fmt.Sprintf("%#v", this.Presence)+",\n") + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRP_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRP_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func valueToGoStringActualLrp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Evacuating != nil { + { + size, err := m.Evacuating.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Instance != nil { + { + size, err := m.Instance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PortMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HostTlsProxyPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.HostTlsProxyPort)) + i-- + dAtA[i] = 0x20 + } + if m.ContainerTlsProxyPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.ContainerTlsProxyPort)) + i-- + dAtA[i] = 0x18 + } + if m.HostPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + } + if m.ContainerPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceGuid) > 0 { + i -= len(m.InstanceGuid) + copy(dAtA[i:], m.InstanceGuid) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.InstanceGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPNetInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPNetInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPNetInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreferredAddress != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.PreferredAddress)) + i-- + dAtA[i] = 0x20 + } + if len(m.InstanceAddress) > 0 { + i -= len(m.InstanceAddress) + copy(dAtA[i:], m.InstanceAddress) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.InstanceAddress))) + i-- + dAtA[i] = 0x1a + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInternalRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInternalRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInternalRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x72 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintActualLrp(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintActualLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintActualLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Presence != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Presence)) + i-- + dAtA[i] = 0x50 + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Since != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x40 + } + if len(m.PlacementError) > 0 { + i -= len(m.PlacementError) + copy(dAtA[i:], m.PlacementError) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.PlacementError))) + i-- + dAtA[i] = 0x3a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x2a + } + if m.CrashCount != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.ActualLRPNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ActualLRP_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRP_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + return len(dAtA) - i, nil +} +func encodeVarintActualLrp(dAtA []byte, offset int, v uint64) int { + offset -= sovActualLrp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Instance != nil { + l = m.Instance.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Evacuating != nil { + l = m.Evacuating.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *PortMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ContainerPort != 0 { + n += 1 + sovActualLrp(uint64(m.ContainerPort)) + } + if m.HostPort != 0 { + n += 1 + sovActualLrp(uint64(m.HostPort)) + } + if m.ContainerTlsProxyPort != 0 { + n += 1 + sovActualLrp(uint64(m.ContainerTlsProxyPort)) + } + if m.HostTlsProxyPort != 0 { + n += 1 + sovActualLrp(uint64(m.HostTlsProxyPort)) + } + return n +} + +func (m *ActualLRPKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrp(uint64(m.Index)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceGuid) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRPNetInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + } + l = len(m.InstanceAddress) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.PreferredAddress != 0 { + n += 1 + sovActualLrp(uint64(m.PreferredAddress)) + } + return n +} + +func (m *ActualLRPInternalRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovActualLrp(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovActualLrp(uint64(l)) + l = m.ActualLRPNetInfo.Size() + n += 1 + l + sovActualLrp(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovActualLrp(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.PlacementError) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovActualLrp(uint64(m.Since)) + } + l = m.ModificationTag.Size() + n += 1 + l + sovActualLrp(uint64(l)) + if m.Presence != 0 { + n += 1 + sovActualLrp(uint64(m.Presence)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovActualLrp(uint64(len(k))) + 1 + len(v) + sovActualLrp(uint64(len(v))) + n += mapEntrySize + 1 + sovActualLrp(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRP_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func sovActualLrp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActualLrp(x uint64) (n int) { + return sovActualLrp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroup{`, + `Instance:` + strings.Replace(this.Instance.String(), "ActualLRP", "ActualLRP", 1) + `,`, + `Evacuating:` + strings.Replace(this.Evacuating.String(), "ActualLRP", "ActualLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortMapping{`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerTlsProxyPort:` + fmt.Sprintf("%v", this.ContainerTlsProxyPort) + `,`, + `HostTlsProxyPort:` + fmt.Sprintf("%v", this.HostTlsProxyPort) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPKey{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceKey{`, + `InstanceGuid:` + fmt.Sprintf("%v", this.InstanceGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPNetInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]*PortMapping{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(f.String(), "PortMapping", "PortMapping", 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&ActualLRPNetInfo{`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `InstanceAddress:` + fmt.Sprintf("%v", this.InstanceAddress) + `,`, + `PreferredAddress:` + fmt.Sprintf("%v", this.PreferredAddress) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInternalRoute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInternalRoute{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRP) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(f.String(), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&ActualLRP{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(this.ActualLRPKey.String(), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(this.ActualLRPInstanceKey.String(), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `ActualLRPNetInfo:` + strings.Replace(strings.Replace(this.ActualLRPNetInfo.String(), "ActualLRPNetInfo", "ActualLRPNetInfo", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `PlacementError:` + fmt.Sprintf("%v", this.PlacementError) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `Presence:` + fmt.Sprintf("%v", this.Presence) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRP_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRP_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func valueToStringActualLrp(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Instance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Instance == nil { + m.Instance = &ActualLRP{} + } + if err := m.Instance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evacuating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evacuating == nil { + m.Evacuating = &ActualLRP{} + } + if err := m.Evacuating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTlsProxyPort", wireType) + } + m.ContainerTlsProxyPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerTlsProxyPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostTlsProxyPort", wireType) + } + m.HostTlsProxyPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostTlsProxyPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstanceGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPNetInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPNetInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPNetInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortMapping{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstanceAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredAddress", wireType) + } + m.PreferredAddress = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreferredAddress |= ActualLRPNetInfo_PreferredAddress(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInternalRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInternalRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInternalRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Presence", wireType) + } + m.Presence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Presence |= ActualLRP_Presence(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthActualLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthActualLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthActualLrp + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthActualLrp + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &ActualLRP_Routable{b} + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActualLrp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActualLrp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActualLrp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActualLrp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActualLrp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActualLrp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActualLrp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto new file mode 100644 index 00000000..aa089c96 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "modification_tag.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message ActualLRPGroup { + option deprecated = true; + ActualLRP instance = 1; + ActualLRP evacuating = 2; +} + +message PortMapping { + uint32 container_port = 1 [(gogoproto.jsontag) = "container_port"]; + uint32 host_port = 2 [(gogoproto.jsontag) = "host_port"]; + uint32 container_tls_proxy_port = 3 [(gogoproto.jsontag) = "container_tls_proxy_port"]; + uint32 host_tls_proxy_port = 4 [(gogoproto.jsontag) = "host_tls_proxy_port"]; +} + +message ActualLRPKey { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; +} + +message ActualLRPInstanceKey { + string instance_guid = 1 [(gogoproto.jsontag) = "instance_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message ActualLRPNetInfo { + string address = 1 [(gogoproto.jsontag) = "address"]; + repeated PortMapping ports = 2 [(gogoproto.jsontag) = "ports"]; + string instance_address = 3; + + enum PreferredAddress { + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "PreferredAddressUnknown"]; + INSTANCE = 1 [(gogoproto.enumvalue_customname) = "PreferredAddressInstance"]; + HOST = 2 [(gogoproto.enumvalue_customname) = "PreferredAddressHost"]; + } + + PreferredAddress preferred_address = 4 [(gogoproto.jsontag) = "preferred_address"]; +} + +message ActualLRPInternalRoute { + string hostname = 1 [(gogoproto.jsontag) = "hostname"]; +} + +message ActualLRP { + enum Presence { + ORDINARY = 0 [(gogoproto.enumvalue_customname) = "Ordinary"]; + EVACUATING = 1 [(gogoproto.enumvalue_customname) = "Evacuating"]; + SUSPECT = 2 [(gogoproto.enumvalue_customname) = "Suspect"]; + } + + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPNetInfo actual_lrp_net_info = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 4 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 5; + string state = 6 [(gogoproto.jsontag) = "state"]; + string placement_error = 7; + int64 since = 8 [(gogoproto.jsontag) = "since"]; + ModificationTag modification_tag = 9 [(gogoproto.nullable) = false,(gogoproto.jsontag) = "modification_tag"]; + Presence presence = 10 [(gogoproto.jsontag) = "presence"]; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 11; + map metric_tags = 12; + oneof optional_routable { + bool routable = 13 [(gogoproto.jsontag) = "routable"]; + } + string availability_zone = 14 [(gogoproto.jsontag) = "availability_zone"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go new file mode 100644 index 00000000..96268193 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go @@ -0,0 +1,346 @@ +package models + +import "encoding/json" + +func (request *ActualLRPsRequest) Validate() error { + return nil +} + +func (request *ActualLRPsRequest) SetIndex(index int32) { + request.OptionalIndex = &ActualLRPsRequest_Index{Index: index} +} + +func (request ActualLRPsRequest) IndexExists() bool { + _, ok := request.GetOptionalIndex().(*ActualLRPsRequest_Index) + return ok +} + +type internalActualLRPsRequest struct { + Domain string `json:"domain"` + CellId string `json:"cell_id"` + ProcessGuid string `json:"process_guid"` + Index *int32 `json:"index,omitempty"` +} + +func (request *ActualLRPsRequest) UnmarshalJSON(data []byte) error { + var internalRequest internalActualLRPsRequest + if err := json.Unmarshal(data, &internalRequest); err != nil { + return err + } + + request.Domain = internalRequest.Domain + request.CellId = internalRequest.CellId + request.ProcessGuid = internalRequest.ProcessGuid + if internalRequest.Index != nil { + request.SetIndex(*internalRequest.Index) + } + + return nil +} + +func (request ActualLRPsRequest) MarshalJSON() ([]byte, error) { + internalRequest := internalActualLRPsRequest{ + Domain: request.Domain, + CellId: request.CellId, + ProcessGuid: request.ProcessGuid, + } + + if request.IndexExists() { + i := request.GetIndex() + internalRequest.Index = &i + } + return json.Marshal(internalRequest) +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupsRequest) Validate() error { + return nil +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupsByProcessGuidRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupByProcessGuidAndIndexRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *ClaimActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *StartActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpNetInfo == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_net_info"}) + } else if err := request.ActualLrpNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *StartActualLRPRequest) SetRoutable(routable bool) { + request.OptionalRoutable = &StartActualLRPRequest_Routable{ + Routable: routable, + } +} + +func (request *StartActualLRPRequest) RoutableExists() bool { + _, ok := request.GetOptionalRoutable().(*StartActualLRPRequest_Routable) + return ok +} + +func (request *CrashActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *FailActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ErrorMessage == "" { + validationError = validationError.Append(ErrInvalidField{"error_message"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RetireActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveEvacuatingActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateClaimedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateCrashedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ErrorMessage == "" { + validationError = validationError.Append(ErrInvalidField{"error_message"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateStoppedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateRunningActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpNetInfo == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_net_info"}) + } else if err := request.ActualLrpNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go new file mode 100644 index 00000000..f34f6715 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go @@ -0,0 +1,4872 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actual_lrp_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ActualLRPLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ActualLRPLifecycleResponse) Reset() { *m = ActualLRPLifecycleResponse{} } +func (*ActualLRPLifecycleResponse) ProtoMessage() {} +func (*ActualLRPLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{0} +} +func (m *ActualLRPLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPLifecycleResponse.Merge(m, src) +} +func (m *ActualLRPLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPLifecycleResponse proto.InternalMessageInfo + +func (m *ActualLRPLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrpGroups []*ActualLRPGroup `protobuf:"bytes,2,rep,name=actual_lrp_groups,json=actualLrpGroups,proto3" json:"actual_lrp_groups,omitempty"` +} + +func (m *ActualLRPGroupsResponse) Reset() { *m = ActualLRPGroupsResponse{} } +func (*ActualLRPGroupsResponse) ProtoMessage() {} +func (*ActualLRPGroupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{1} +} +func (m *ActualLRPGroupsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsResponse.Merge(m, src) +} +func (m *ActualLRPGroupsResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsResponse proto.InternalMessageInfo + +func (m *ActualLRPGroupsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPGroupsResponse) GetActualLrpGroups() []*ActualLRPGroup { + if m != nil { + return m.ActualLrpGroups + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,2,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPGroupResponse) Reset() { *m = ActualLRPGroupResponse{} } +func (*ActualLRPGroupResponse) ProtoMessage() {} +func (*ActualLRPGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{2} +} +func (m *ActualLRPGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupResponse.Merge(m, src) +} +func (m *ActualLRPGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupResponse proto.InternalMessageInfo + +func (m *ActualLRPGroupResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPGroupResponse) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *ActualLRPGroupsRequest) Reset() { *m = ActualLRPGroupsRequest{} } +func (*ActualLRPGroupsRequest) ProtoMessage() {} +func (*ActualLRPGroupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{3} +} +func (m *ActualLRPGroupsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsRequest.Merge(m, src) +} +func (m *ActualLRPGroupsRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *ActualLRPGroupsRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +// Deprecated: Do not use. +type ActualLRPGroupsByProcessGuidRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Reset() { *m = ActualLRPGroupsByProcessGuidRequest{} } +func (*ActualLRPGroupsByProcessGuidRequest) ProtoMessage() {} +func (*ActualLRPGroupsByProcessGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{4} +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.Merge(m, src) +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupsByProcessGuidRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +// Deprecated: Do not use. +type ActualLRPGroupByProcessGuidAndIndexRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Reset() { + *m = ActualLRPGroupByProcessGuidAndIndexRequest{} +} +func (*ActualLRPGroupByProcessGuidAndIndexRequest) ProtoMessage() {} +func (*ActualLRPGroupByProcessGuidAndIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{5} +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.Merge(m, src) +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +type ClaimActualLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,3,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *ClaimActualLRPRequest) Reset() { *m = ClaimActualLRPRequest{} } +func (*ClaimActualLRPRequest) ProtoMessage() {} +func (*ClaimActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{6} +} +func (m *ClaimActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClaimActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClaimActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClaimActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClaimActualLRPRequest.Merge(m, src) +} +func (m *ClaimActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *ClaimActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClaimActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClaimActualLRPRequest proto.InternalMessageInfo + +func (m *ClaimActualLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ClaimActualLRPRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ClaimActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type StartActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ActualLrpNetInfo *ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3" json:"actual_lrp_net_info,omitempty"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,4,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,5,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *StartActualLRPRequest_Routable + OptionalRoutable isStartActualLRPRequest_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,7,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *StartActualLRPRequest) Reset() { *m = StartActualLRPRequest{} } +func (*StartActualLRPRequest) ProtoMessage() {} +func (*StartActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{7} +} +func (m *StartActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartActualLRPRequest.Merge(m, src) +} +func (m *StartActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *StartActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartActualLRPRequest proto.InternalMessageInfo + +type isStartActualLRPRequest_OptionalRoutable interface { + isStartActualLRPRequest_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type StartActualLRPRequest_Routable struct { + Routable bool `protobuf:"varint,6,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*StartActualLRPRequest_Routable) isStartActualLRPRequest_OptionalRoutable() {} + +func (m *StartActualLRPRequest) GetOptionalRoutable() isStartActualLRPRequest_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpNetInfo() *ActualLRPNetInfo { + if m != nil { + return m.ActualLrpNetInfo + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *StartActualLRPRequest) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *StartActualLRPRequest) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*StartActualLRPRequest_Routable); ok { + return x.Routable + } + return false +} + +func (m *StartActualLRPRequest) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*StartActualLRPRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*StartActualLRPRequest_Routable)(nil), + } +} + +type CrashActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *CrashActualLRPRequest) Reset() { *m = CrashActualLRPRequest{} } +func (*CrashActualLRPRequest) ProtoMessage() {} +func (*CrashActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{8} +} +func (m *CrashActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrashActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CrashActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CrashActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrashActualLRPRequest.Merge(m, src) +} +func (m *CrashActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *CrashActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CrashActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CrashActualLRPRequest proto.InternalMessageInfo + +func (m *CrashActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *CrashActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *CrashActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type FailActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *FailActualLRPRequest) Reset() { *m = FailActualLRPRequest{} } +func (*FailActualLRPRequest) ProtoMessage() {} +func (*FailActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{9} +} +func (m *FailActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FailActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FailActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FailActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FailActualLRPRequest.Merge(m, src) +} +func (m *FailActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *FailActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FailActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FailActualLRPRequest proto.InternalMessageInfo + +func (m *FailActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *FailActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type RetireActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` +} + +func (m *RetireActualLRPRequest) Reset() { *m = RetireActualLRPRequest{} } +func (*RetireActualLRPRequest) ProtoMessage() {} +func (*RetireActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{10} +} +func (m *RetireActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetireActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RetireActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RetireActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetireActualLRPRequest.Merge(m, src) +} +func (m *RetireActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RetireActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RetireActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RetireActualLRPRequest proto.InternalMessageInfo + +func (m *RetireActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +type RemoveActualLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,3,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *RemoveActualLRPRequest) Reset() { *m = RemoveActualLRPRequest{} } +func (*RemoveActualLRPRequest) ProtoMessage() {} +func (*RemoveActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{11} +} +func (m *RemoveActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveActualLRPRequest.Merge(m, src) +} +func (m *RemoveActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveActualLRPRequest proto.InternalMessageInfo + +func (m *RemoveActualLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *RemoveActualLRPRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RemoveActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type ActualLRPsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrps []*ActualLRP `protobuf:"bytes,2,rep,name=actual_lrps,json=actualLrps,proto3" json:"actual_lrps,omitempty"` +} + +func (m *ActualLRPsResponse) Reset() { *m = ActualLRPsResponse{} } +func (*ActualLRPsResponse) ProtoMessage() {} +func (*ActualLRPsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{12} +} +func (m *ActualLRPsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPsResponse.Merge(m, src) +} +func (m *ActualLRPsResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPsResponse proto.InternalMessageInfo + +func (m *ActualLRPsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPsResponse) GetActualLrps() []*ActualLRP { + if m != nil { + return m.ActualLrps + } + return nil +} + +type ActualLRPsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + ProcessGuid string `protobuf:"bytes,3,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + // Types that are valid to be assigned to OptionalIndex: + // *ActualLRPsRequest_Index + OptionalIndex isActualLRPsRequest_OptionalIndex `protobuf_oneof:"optional_index"` +} + +func (m *ActualLRPsRequest) Reset() { *m = ActualLRPsRequest{} } +func (*ActualLRPsRequest) ProtoMessage() {} +func (*ActualLRPsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{13} +} +func (m *ActualLRPsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPsRequest.Merge(m, src) +} +func (m *ActualLRPsRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPsRequest proto.InternalMessageInfo + +type isActualLRPsRequest_OptionalIndex interface { + isActualLRPsRequest_OptionalIndex() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRPsRequest_Index struct { + Index int32 `protobuf:"varint,4,opt,name=index,proto3,oneof" json:"index"` +} + +func (*ActualLRPsRequest_Index) isActualLRPsRequest_OptionalIndex() {} + +func (m *ActualLRPsRequest) GetOptionalIndex() isActualLRPsRequest_OptionalIndex { + if m != nil { + return m.OptionalIndex + } + return nil +} + +func (m *ActualLRPsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *ActualLRPsRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *ActualLRPsRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPsRequest) GetIndex() int32 { + if x, ok := m.GetOptionalIndex().(*ActualLRPsRequest_Index); ok { + return x.Index + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRPsRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRPsRequest_Index)(nil), + } +} + +func init() { + proto.RegisterType((*ActualLRPLifecycleResponse)(nil), "models.ActualLRPLifecycleResponse") + proto.RegisterType((*ActualLRPGroupsResponse)(nil), "models.ActualLRPGroupsResponse") + proto.RegisterType((*ActualLRPGroupResponse)(nil), "models.ActualLRPGroupResponse") + proto.RegisterType((*ActualLRPGroupsRequest)(nil), "models.ActualLRPGroupsRequest") + proto.RegisterType((*ActualLRPGroupsByProcessGuidRequest)(nil), "models.ActualLRPGroupsByProcessGuidRequest") + proto.RegisterType((*ActualLRPGroupByProcessGuidAndIndexRequest)(nil), "models.ActualLRPGroupByProcessGuidAndIndexRequest") + proto.RegisterType((*ClaimActualLRPRequest)(nil), "models.ClaimActualLRPRequest") + proto.RegisterType((*StartActualLRPRequest)(nil), "models.StartActualLRPRequest") + proto.RegisterMapType((map[string]string)(nil), "models.StartActualLRPRequest.MetricTagsEntry") + proto.RegisterType((*CrashActualLRPRequest)(nil), "models.CrashActualLRPRequest") + proto.RegisterType((*FailActualLRPRequest)(nil), "models.FailActualLRPRequest") + proto.RegisterType((*RetireActualLRPRequest)(nil), "models.RetireActualLRPRequest") + proto.RegisterType((*RemoveActualLRPRequest)(nil), "models.RemoveActualLRPRequest") + proto.RegisterType((*ActualLRPsResponse)(nil), "models.ActualLRPsResponse") + proto.RegisterType((*ActualLRPsRequest)(nil), "models.ActualLRPsRequest") +} + +func init() { proto.RegisterFile("actual_lrp_requests.proto", fileDescriptor_a7753fd8557db809) } + +var fileDescriptor_a7753fd8557db809 = []byte{ + // 851 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0x38, 0xb1, 0x5b, 0x3f, 0xa7, 0xa9, 0xbd, 0xcd, 0x8f, 0xc5, 0xaa, 0xd6, 0x61, 0xcb, + 0x21, 0x42, 0xaa, 0x2b, 0xa5, 0x08, 0xa1, 0x48, 0x48, 0xc4, 0xa8, 0xa4, 0x56, 0xd3, 0xaa, 0x9a, + 0xf6, 0x04, 0x12, 0xab, 0xb1, 0x3d, 0x76, 0x47, 0xec, 0xee, 0x98, 0x99, 0xd9, 0x08, 0x73, 0x42, + 0x42, 0xea, 0x81, 0x13, 0x7f, 0x06, 0x7f, 0x07, 0x1c, 0xe0, 0x98, 0x03, 0x87, 0x9e, 0xac, 0xc6, + 0xb9, 0x20, 0x9f, 0xfa, 0x27, 0xa0, 0x9d, 0xf1, 0x6e, 0xd6, 0xde, 0x82, 0x1a, 0x08, 0x12, 0x3d, + 0xed, 0xbc, 0x6f, 0xde, 0x7c, 0xdf, 0x37, 0x6f, 0xdf, 0xcc, 0x2e, 0xbc, 0x43, 0x7a, 0x2a, 0x22, + 0xbe, 0xe7, 0x8b, 0x91, 0x27, 0xe8, 0xd7, 0x11, 0x95, 0x4a, 0xb6, 0x46, 0x82, 0x2b, 0x6e, 0x95, + 0x03, 0xde, 0xa7, 0xbe, 0x6c, 0xdc, 0x1e, 0x32, 0xf5, 0x2c, 0xea, 0xb6, 0x7a, 0x3c, 0xb8, 0x33, + 0xe4, 0x43, 0x7e, 0x47, 0x4f, 0x77, 0xa3, 0x81, 0x8e, 0x74, 0xa0, 0x47, 0x66, 0x59, 0xa3, 0x76, + 0xce, 0x38, 0x47, 0xaa, 0x54, 0x08, 0x2e, 0x4c, 0xe0, 0x1e, 0x40, 0xe3, 0x40, 0x27, 0x1c, 0xe1, + 0xc7, 0x47, 0x6c, 0x40, 0x7b, 0xe3, 0x9e, 0x4f, 0x31, 0x95, 0x23, 0x1e, 0x4a, 0x6a, 0xdd, 0x82, + 0x92, 0x4e, 0xb6, 0xd1, 0x0e, 0xda, 0xad, 0xee, 0x5d, 0x6b, 0x19, 0x0f, 0xad, 0x7b, 0x31, 0x88, + 0xcd, 0x9c, 0xfb, 0x1c, 0xc1, 0x76, 0xca, 0x71, 0x28, 0x78, 0x34, 0x92, 0x17, 0x22, 0xb0, 0xda, + 0x50, 0xcf, 0x6c, 0x7b, 0xa8, 0x19, 0xec, 0xe2, 0xce, 0xca, 0x6e, 0x75, 0x6f, 0x2b, 0x59, 0xb0, + 0x28, 0x80, 0xaf, 0x9b, 0x05, 0x47, 0x62, 0x64, 0x04, 0xf7, 0x8b, 0x36, 0x72, 0xbf, 0x47, 0xb0, + 0xb5, 0x94, 0x77, 0x21, 0x1f, 0x9f, 0x40, 0x6d, 0xd9, 0x87, 0x5d, 0xd4, 0xf9, 0x7f, 0x65, 0x63, + 0x7d, 0xd1, 0x86, 0x76, 0x31, 0x58, 0x36, 0x21, 0xb1, 0x79, 0x91, 0x96, 0x0b, 0xe5, 0x3e, 0x0f, + 0x08, 0x0b, 0xb5, 0x8b, 0x4a, 0x1b, 0x66, 0x93, 0xe6, 0x1c, 0xc1, 0xf3, 0xa7, 0xf5, 0x1e, 0x5c, + 0xe9, 0x51, 0xdf, 0xf7, 0x58, 0x5f, 0x4b, 0x57, 0xda, 0xd5, 0xd9, 0xa4, 0x99, 0x40, 0xb8, 0x1c, + 0x0f, 0x3a, 0x7d, 0xad, 0xf3, 0x25, 0xdc, 0x5a, 0xd2, 0x69, 0x8f, 0x1f, 0x0b, 0xde, 0xa3, 0x52, + 0x1e, 0x46, 0xac, 0x9f, 0x88, 0xde, 0x85, 0xb5, 0x91, 0x41, 0xbd, 0x61, 0xc4, 0xfa, 0x73, 0xe9, + 0xda, 0x6c, 0xd2, 0x5c, 0xc0, 0x71, 0x75, 0x74, 0xbe, 0x56, 0xf3, 0x3f, 0x47, 0xf0, 0xfe, 0xa2, + 0xc0, 0x02, 0xff, 0x41, 0xd8, 0xef, 0x84, 0x7d, 0xfa, 0xcd, 0xbf, 0xd1, 0xb1, 0x9a, 0x50, 0x62, + 0x31, 0x89, 0xde, 0x6b, 0xa9, 0x5d, 0x99, 0x4d, 0x9a, 0x06, 0xc0, 0xe6, 0xa1, 0x8d, 0xfc, 0x8c, + 0x60, 0xf3, 0x53, 0x9f, 0xb0, 0x20, 0x75, 0xf3, 0x9f, 0x6a, 0x5a, 0x4f, 0x60, 0x3b, 0xd3, 0x06, + 0x2c, 0x94, 0x8a, 0x84, 0x3d, 0xea, 0x7d, 0x45, 0xc7, 0xf6, 0x8a, 0xee, 0x86, 0x9b, 0xb9, 0x6e, + 0xe8, 0xcc, 0x93, 0x1e, 0xd0, 0x31, 0xde, 0x48, 0x7b, 0x22, 0x83, 0xba, 0xbf, 0xaf, 0xc2, 0xe6, + 0x13, 0x45, 0x84, 0xca, 0x6d, 0x62, 0x1f, 0xd6, 0x33, 0x72, 0xb1, 0x8a, 0xe9, 0xd1, 0x8d, 0x9c, + 0x4a, 0xcc, 0xbe, 0x96, 0xb2, 0x3f, 0xa0, 0xe3, 0xbf, 0xb3, 0x5a, 0xfc, 0xa7, 0x56, 0xad, 0x43, + 0xb8, 0x91, 0x21, 0x0d, 0xa9, 0xf2, 0x58, 0x38, 0xe0, 0xf3, 0xbd, 0xdb, 0x39, 0xc2, 0x47, 0x54, + 0x75, 0xc2, 0x01, 0xc7, 0xb5, 0x94, 0x6c, 0x8e, 0x58, 0x5f, 0x40, 0x63, 0xc1, 0x9d, 0xa2, 0x22, + 0x24, 0xbe, 0x27, 0x78, 0xa4, 0xa8, 0xb4, 0x57, 0xf5, 0x01, 0x77, 0x5e, 0x63, 0xd0, 0xe4, 0xe1, + 0x38, 0x0d, 0x6f, 0x67, 0x2c, 0x66, 0x70, 0x69, 0x3d, 0x82, 0x6a, 0x40, 0x95, 0x60, 0x3d, 0x4f, + 0x91, 0xa1, 0xb4, 0x4b, 0x9a, 0xed, 0x76, 0xc2, 0xf6, 0xda, 0x52, 0xb7, 0x1e, 0xea, 0x05, 0x4f, + 0xc9, 0x50, 0xde, 0x0b, 0x95, 0x18, 0x63, 0x08, 0x52, 0xc0, 0xba, 0x09, 0x57, 0x63, 0x66, 0xd2, + 0xf5, 0xa9, 0x5d, 0xde, 0x41, 0xbb, 0x57, 0xef, 0x17, 0x70, 0x8a, 0xe8, 0x2b, 0xea, 0x98, 0x30, + 0x9f, 0x74, 0x99, 0xcf, 0xd4, 0xd8, 0xfb, 0x96, 0x87, 0xd4, 0xbe, 0xa2, 0xdb, 0x6d, 0x73, 0x36, + 0x69, 0xe6, 0x27, 0x71, 0x2d, 0x0b, 0x7d, 0xce, 0x43, 0xda, 0xf8, 0x18, 0xae, 0x2f, 0x19, 0xb0, + 0x6a, 0xb0, 0x92, 0xbc, 0xf0, 0x0a, 0x8e, 0x87, 0xd6, 0x06, 0x94, 0x8e, 0x89, 0x1f, 0x51, 0x73, + 0xfa, 0xb1, 0x09, 0xf6, 0x8b, 0x1f, 0xa1, 0xf6, 0x0d, 0xa8, 0xf3, 0x91, 0x62, 0x3c, 0x29, 0x61, + 0xec, 0xcb, 0x7d, 0x19, 0x9f, 0x0d, 0x41, 0xe4, 0xb3, 0xff, 0x7f, 0x5b, 0x7d, 0x08, 0xd7, 0xf4, + 0x35, 0xeb, 0x05, 0x54, 0x4a, 0x32, 0xa4, 0xba, 0xa1, 0x2a, 0xed, 0xfa, 0x6c, 0xd2, 0x5c, 0x9c, + 0xc0, 0x6b, 0x3a, 0x7c, 0x68, 0x22, 0xf7, 0x07, 0x04, 0x1b, 0x9f, 0x11, 0xe6, 0x5f, 0xea, 0x0e, + 0x73, 0x66, 0x8a, 0x6f, 0x66, 0xe6, 0x29, 0x6c, 0x61, 0xaa, 0x98, 0xa0, 0x97, 0xe9, 0xc6, 0xfd, + 0x05, 0xc5, 0xb4, 0x01, 0x3f, 0xa6, 0x6f, 0xf3, 0x15, 0x17, 0x80, 0x95, 0x66, 0x5f, 0xf0, 0x0f, + 0x60, 0x0f, 0xaa, 0xe7, 0x7e, 0x92, 0x6f, 0x7f, 0x3d, 0xe7, 0x01, 0x43, 0x2a, 0x2c, 0xdd, 0x5f, + 0x11, 0xd4, 0xb3, 0x7a, 0x97, 0xfc, 0x8d, 0xcd, 0x55, 0x7e, 0xe5, 0x4d, 0x2a, 0xff, 0x6e, 0x52, + 0xf9, 0xd5, 0xa5, 0xca, 0xdf, 0x2f, 0xcc, 0x6b, 0xdf, 0xae, 0xc1, 0x7a, 0x7a, 0x8e, 0x0d, 0xf2, + 0xc1, 0xc9, 0xa9, 0x53, 0x78, 0x71, 0xea, 0x14, 0x5e, 0x9d, 0x3a, 0xe8, 0xbb, 0xa9, 0x83, 0x7e, + 0x9a, 0x3a, 0xe8, 0xb7, 0xa9, 0x83, 0x4e, 0xa6, 0x0e, 0x7a, 0x39, 0x75, 0xd0, 0x1f, 0x53, 0xa7, + 0xf0, 0x6a, 0xea, 0xa0, 0x1f, 0xcf, 0x9c, 0xc2, 0xc9, 0x99, 0x53, 0x78, 0x71, 0xe6, 0x14, 0xba, + 0x65, 0xfd, 0x03, 0x77, 0xf7, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xee, 0x85, 0x3a, 0xee, 0x33, + 0x0a, 0x00, 0x00, +} + +func (this *ActualLRPLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPLifecycleResponse) + if !ok { + that2, ok := that.(ActualLRPLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *ActualLRPGroupsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsResponse) + if !ok { + that2, ok := that.(ActualLRPGroupsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.ActualLrpGroups) != len(that1.ActualLrpGroups) { + return false + } + for i := range this.ActualLrpGroups { + if !this.ActualLrpGroups[i].Equal(that1.ActualLrpGroups[i]) { + return false + } + } + return true +} +func (this *ActualLRPGroupResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupResponse) + if !ok { + that2, ok := that.(ActualLRPGroupResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPGroupsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsRequest) + if !ok { + that2, ok := that.(ActualLRPGroupsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *ActualLRPGroupsByProcessGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsByProcessGuidRequest) + if !ok { + that2, ok := that.(ActualLRPGroupsByProcessGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupByProcessGuidAndIndexRequest) + if !ok { + that2, ok := that.(ActualLRPGroupByProcessGuidAndIndexRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ClaimActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClaimActualLRPRequest) + if !ok { + that2, ok := that.(ClaimActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + return true +} +func (this *StartActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartActualLRPRequest) + if !ok { + that2, ok := that.(StartActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + if !this.ActualLrpNetInfo.Equal(that1.ActualLrpNetInfo) { + return false + } + if len(this.ActualLrpInternalRoutes) != len(that1.ActualLrpInternalRoutes) { + return false + } + for i := range this.ActualLrpInternalRoutes { + if !this.ActualLrpInternalRoutes[i].Equal(that1.ActualLrpInternalRoutes[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if this.MetricTags[i] != that1.MetricTags[i] { + return false + } + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *StartActualLRPRequest_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartActualLRPRequest_Routable) + if !ok { + that2, ok := that.(StartActualLRPRequest_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *CrashActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CrashActualLRPRequest) + if !ok { + that2, ok := that.(CrashActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + if this.ErrorMessage != that1.ErrorMessage { + return false + } + return true +} +func (this *FailActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FailActualLRPRequest) + if !ok { + that2, ok := that.(FailActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if this.ErrorMessage != that1.ErrorMessage { + return false + } + return true +} +func (this *RetireActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RetireActualLRPRequest) + if !ok { + that2, ok := that.(RetireActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + return true +} +func (this *RemoveActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveActualLRPRequest) + if !ok { + that2, ok := that.(RemoveActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + return true +} +func (this *ActualLRPsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsResponse) + if !ok { + that2, ok := that.(ActualLRPsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.ActualLrps) != len(that1.ActualLrps) { + return false + } + for i := range this.ActualLrps { + if !this.ActualLrps[i].Equal(that1.ActualLrps[i]) { + return false + } + } + return true +} +func (this *ActualLRPsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsRequest) + if !ok { + that2, ok := that.(ActualLRPsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if that1.OptionalIndex == nil { + if this.OptionalIndex != nil { + return false + } + } else if this.OptionalIndex == nil { + return false + } else if !this.OptionalIndex.Equal(that1.OptionalIndex) { + return false + } + return true +} +func (this *ActualLRPsRequest_Index) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsRequest_Index) + if !ok { + that2, ok := that.(ActualLRPsRequest_Index) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ActualLRPLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrpGroups != nil { + s = append(s, "ActualLrpGroups: "+fmt.Sprintf("%#v", this.ActualLrpGroups)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsByProcessGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPGroupsByProcessGuidRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupByProcessGuidAndIndexRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ClaimActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.ClaimActualLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.StartActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + if this.ActualLrpNetInfo != nil { + s = append(s, "ActualLrpNetInfo: "+fmt.Sprintf("%#v", this.ActualLrpNetInfo)+",\n") + } + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartActualLRPRequest_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.StartActualLRPRequest_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *CrashActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CrashActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FailActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.FailActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RetireActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RetireActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.RemoveActualLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrps != nil { + s = append(s, "ActualLrps: "+fmt.Sprintf("%#v", this.ActualLrps)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.ActualLRPsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + if this.OptionalIndex != nil { + s = append(s, "OptionalIndex: "+fmt.Sprintf("%#v", this.OptionalIndex)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsRequest_Index) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRPsRequest_Index{` + + `Index:` + fmt.Sprintf("%#v", this.Index) + `}`}, ", ") + return s +} +func valueToGoStringActualLrpRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ActualLrpGroups) > 0 { + for iNdEx := len(m.ActualLrpGroups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsByProcessGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsByProcessGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClaimActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClaimActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClaimActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x3a + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintActualLrpRequests(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ActualLrpNetInfo != nil { + { + size, err := m.ActualLrpNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartActualLRPRequest_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartActualLRPRequest_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *CrashActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrashActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrashActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FailActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FailActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FailActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetireActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetireActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetireActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ActualLrps) > 0 { + for iNdEx := len(m.ActualLrps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalIndex != nil { + { + size := m.OptionalIndex.Size() + i -= size + if _, err := m.OptionalIndex.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsRequest_Index) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsRequest_Index) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func encodeVarintActualLrpRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovActualLrpRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrpGroups) > 0 { + for _, e := range m.ActualLrpGroups { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + return n +} + +func (m *ActualLRPGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + return n +} + +func (m *ClaimActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *StartActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpNetInfo != nil { + l = m.ActualLrpNetInfo.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovActualLrpRequests(uint64(len(k))) + 1 + len(v) + sovActualLrpRequests(uint64(len(v))) + n += mapEntrySize + 1 + sovActualLrpRequests(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *StartActualLRPRequest_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *CrashActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *FailActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *RetireActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *RemoveActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrps) > 0 { + for _, e := range m.ActualLrps { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + return n +} + +func (m *ActualLRPsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.OptionalIndex != nil { + n += m.OptionalIndex.Size() + } + return n +} + +func (m *ActualLRPsRequest_Index) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActualLrpRequests(uint64(m.Index)) + return n +} + +func sovActualLrpRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActualLrpRequests(x uint64) (n int) { + return sovActualLrpRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpGroups := "[]*ActualLRPGroup{" + for _, f := range this.ActualLrpGroups { + repeatedStringForActualLrpGroups += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPGroup", "ActualLRPGroup", 1) + "," + } + repeatedStringForActualLrpGroups += "}" + s := strings.Join([]string{`&ActualLRPGroupsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrpGroups:` + repeatedStringForActualLrpGroups + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsByProcessGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupsByProcessGuidRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupByProcessGuidAndIndexRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *ClaimActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClaimActualLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StartActualLRPRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&StartActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ActualLrpNetInfo:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *StartActualLRPRequest_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartActualLRPRequest_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *CrashActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrashActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *FailActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FailActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *RetireActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetireActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveActualLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrps := "[]*ActualLRP{" + for _, f := range this.ActualLrps { + repeatedStringForActualLrps += strings.Replace(fmt.Sprintf("%v", f), "ActualLRP", "ActualLRP", 1) + "," + } + repeatedStringForActualLrps += "}" + s := strings.Join([]string{`&ActualLRPsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrps:` + repeatedStringForActualLrps + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `OptionalIndex:` + fmt.Sprintf("%v", this.OptionalIndex) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsRequest_Index) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPsRequest_Index{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func valueToStringActualLrpRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpGroups = append(m.ActualLrpGroups, &ActualLRPGroup{}) + if err := m.ActualLrpGroups[len(m.ActualLrpGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsByProcessGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsByProcessGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsByProcessGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupByProcessGuidAndIndexRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupByProcessGuidAndIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClaimActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClaimActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClaimActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpNetInfo == nil { + m.ActualLrpNetInfo = &ActualLRPNetInfo{} + } + if err := m.ActualLrpNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthActualLrpRequests + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthActualLrpRequests + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &StartActualLRPRequest_Routable{b} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrashActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrashActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrashActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FailActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FailActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FailActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetireActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetireActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetireActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrps = append(m.ActualLrps, &ActualLRP{}) + if err := m.ActualLrps[len(m.ActualLrps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalIndex = &ActualLRPsRequest_Index{v} + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActualLrpRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActualLrpRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActualLrpRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActualLrpRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActualLrpRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActualLrpRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActualLrpRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto new file mode 100644 index 00000000..178d6127 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "error.proto"; + +message ActualLRPLifecycleResponse { + Error error = 1; +} + +message ActualLRPGroupsResponse { + option deprecated = true; + Error error = 1; + repeated ActualLRPGroup actual_lrp_groups = 2; +} + +message ActualLRPGroupResponse { + option deprecated = true; + Error error = 1; + ActualLRPGroup actual_lrp_group = 2; +} + +message ActualLRPGroupsRequest { + option deprecated = true; + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message ActualLRPGroupsByProcessGuidRequest { + option deprecated = true; + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} + +message ActualLRPGroupByProcessGuidAndIndexRequest { + option deprecated = true; + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; +} + +message ClaimActualLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + ActualLRPInstanceKey actual_lrp_instance_key = 3; +} + +message StartActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + ActualLRPNetInfo actual_lrp_net_info = 3; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 4; + map metric_tags = 5; + oneof optional_routable { + bool Routable = 6; + } + string availability_zone = 7 [(gogoproto.jsontag)= "availability_zone"]; +} + +message CrashActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + string error_message = 3 [(gogoproto.jsontag) = "error_message"]; +} + +message FailActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + string error_message = 2 [(gogoproto.jsontag) = "error_message"]; +} + +message RetireActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; +} + +message RemoveActualLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + ActualLRPInstanceKey actual_lrp_instance_key = 3; +} + +message ActualLRPsResponse { + Error error = 1; + repeated ActualLRP actual_lrps = 2; +} + +message ActualLRPsRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; + string process_guid = 3 [(gogoproto.jsontag) = "process_guid"]; + oneof optional_index { + int32 index = 4 [(gogoproto.jsontag) = "index"]; + } +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go b/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go new file mode 100644 index 00000000..6fbb6cc2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go @@ -0,0 +1,38 @@ +package models + +import "net/url" + +type BBSPresence struct { + ID string `json:"id"` + URL string `json:"url"` +} + +func NewBBSPresence(id, url string) BBSPresence { + return BBSPresence{ + ID: id, + URL: url, + } +} + +func (p BBSPresence) Validate() error { + var validationError ValidationError + + if p.ID == "" { + validationError = validationError.Append(ErrInvalidField{Field: "id"}) + } + + if p.URL == "" { + validationError = validationError.Append(ErrInvalidField{Field: "url"}) + } + + url, err := url.Parse(p.URL) + if err != nil || !url.IsAbs() { + validationError = validationError.Append(ErrInvalidField{Field: "url"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go new file mode 100644 index 00000000..dca87b11 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go @@ -0,0 +1,59 @@ +package models + +import ( + "strings" + + "code.cloudfoundry.org/bbs/format" +) + +func (c *CachedDependency) Validate() error { + var validationError ValidationError + + if c.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if c.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if c.GetChecksumValue() != "" && c.GetChecksumAlgorithm() == "" { + validationError = validationError.Append(ErrInvalidField{"checksum algorithm"}) + } + + if c.GetChecksumValue() == "" && c.GetChecksumAlgorithm() != "" { + validationError = validationError.Append(ErrInvalidField{"checksum value"}) + } + + if c.GetChecksumValue() != "" && c.GetChecksumAlgorithm() != "" { + if !contains([]string{"md5", "sha1", "sha256"}, strings.ToLower(c.GetChecksumAlgorithm())) { + validationError = validationError.Append(ErrInvalidField{"invalid algorithm"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func validateCachedDependencies(cachedDependencies []*CachedDependency) ValidationError { + var validationError ValidationError + + if len(cachedDependencies) > 0 { + for _, cacheDep := range cachedDependencies { + err := cacheDep.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"cached_dependency"}) + validationError = validationError.Append(err) + } + } + } + + return validationError +} + +func (c *CachedDependency) Version() format.Version { + return format.V0 +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go new file mode 100644 index 00000000..2bd79438 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cached_dependency.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CachedDependency struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + CacheKey string `protobuf:"bytes,4,opt,name=cache_key,json=cacheKey,proto3" json:"cache_key"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source"` + ChecksumAlgorithm string `protobuf:"bytes,6,opt,name=checksum_algorithm,json=checksumAlgorithm,proto3" json:"checksum_algorithm,omitempty"` + ChecksumValue string `protobuf:"bytes,7,opt,name=checksum_value,json=checksumValue,proto3" json:"checksum_value,omitempty"` +} + +func (m *CachedDependency) Reset() { *m = CachedDependency{} } +func (*CachedDependency) ProtoMessage() {} +func (*CachedDependency) Descriptor() ([]byte, []int) { + return fileDescriptor_936e0e6e1c3697fa, []int{0} +} +func (m *CachedDependency) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachedDependency) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachedDependency.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachedDependency) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachedDependency.Merge(m, src) +} +func (m *CachedDependency) XXX_Size() int { + return m.Size() +} +func (m *CachedDependency) XXX_DiscardUnknown() { + xxx_messageInfo_CachedDependency.DiscardUnknown(m) +} + +var xxx_messageInfo_CachedDependency proto.InternalMessageInfo + +func (m *CachedDependency) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CachedDependency) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *CachedDependency) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *CachedDependency) GetCacheKey() string { + if m != nil { + return m.CacheKey + } + return "" +} + +func (m *CachedDependency) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *CachedDependency) GetChecksumAlgorithm() string { + if m != nil { + return m.ChecksumAlgorithm + } + return "" +} + +func (m *CachedDependency) GetChecksumValue() string { + if m != nil { + return m.ChecksumValue + } + return "" +} + +func init() { + proto.RegisterType((*CachedDependency)(nil), "models.CachedDependency") +} + +func init() { proto.RegisterFile("cached_dependency.proto", fileDescriptor_936e0e6e1c3697fa) } + +var fileDescriptor_936e0e6e1c3697fa = []byte{ + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x33, 0xb9, 0xbd, 0xb9, 0xed, 0x40, 0xcb, 0xbd, 0xb3, 0xb8, 0x0e, 0x22, 0x27, 0x22, + 0x08, 0x22, 0xb4, 0x5d, 0xe8, 0x0b, 0x58, 0xdd, 0xb9, 0x8b, 0xe0, 0x36, 0xa4, 0x93, 0x69, 0x52, + 0x9a, 0xf4, 0x94, 0x34, 0x11, 0xba, 0xf3, 0x11, 0xdc, 0xf9, 0x0a, 0x3e, 0x8a, 0xcb, 0x2e, 0xbb, + 0x0a, 0x76, 0xba, 0x91, 0xac, 0xfa, 0x08, 0x92, 0x23, 0x6d, 0xdd, 0x1c, 0xfe, 0xff, 0xff, 0xfe, + 0x73, 0x18, 0x86, 0x1f, 0xa9, 0x40, 0xc5, 0x3a, 0xf4, 0x43, 0x3d, 0xd3, 0xd3, 0x50, 0x4f, 0xd5, + 0xa2, 0x37, 0xcb, 0x30, 0x47, 0xe1, 0xa4, 0x18, 0xea, 0x64, 0x7e, 0xdc, 0x8d, 0xc6, 0x79, 0x5c, + 0x0c, 0x7b, 0x0a, 0xd3, 0x7e, 0x84, 0x11, 0xf6, 0x09, 0x0f, 0x8b, 0x11, 0x39, 0x32, 0xa4, 0xbe, + 0xd7, 0xce, 0x5e, 0x6d, 0xfe, 0xf7, 0x96, 0x4e, 0xde, 0xed, 0x2f, 0x8a, 0x13, 0xde, 0x98, 0x06, + 0xa9, 0x96, 0xec, 0x94, 0x5d, 0xb4, 0x06, 0xcd, 0xaa, 0x74, 0xc9, 0x7b, 0x34, 0x6b, 0x3a, 0xca, + 0x30, 0x95, 0xf6, 0x81, 0xd6, 0xde, 0xa3, 0x29, 0xfe, 0x73, 0x3b, 0x47, 0xf9, 0x8b, 0x98, 0x53, + 0x95, 0xae, 0x9d, 0xa3, 0x67, 0xe7, 0x28, 0x2e, 0x79, 0x8b, 0x9e, 0xee, 0x4f, 0xf4, 0x42, 0x36, + 0x08, 0xb7, 0xab, 0xd2, 0x3d, 0x84, 0x5e, 0x93, 0xe4, 0xbd, 0x5e, 0x88, 0x2e, 0xe7, 0x09, 0x46, + 0xfe, 0x1c, 0x8b, 0x4c, 0x69, 0xf9, 0x9b, 0xca, 0x9d, 0xaa, 0x74, 0x7f, 0xa4, 0x5e, 0x2b, 0xc1, + 0xe8, 0x81, 0xa4, 0xe8, 0x72, 0xa1, 0x62, 0xad, 0x26, 0xf3, 0x22, 0xf5, 0x83, 0x24, 0xc2, 0x6c, + 0x9c, 0xc7, 0xa9, 0x74, 0xea, 0x35, 0xef, 0xdf, 0x8e, 0xdc, 0xec, 0x80, 0x38, 0xe7, 0x9d, 0x7d, + 0xfd, 0x29, 0x48, 0x0a, 0x2d, 0xff, 0x50, 0xb5, 0xbd, 0x4b, 0x1f, 0xeb, 0x70, 0x70, 0xbd, 0x5c, + 0x03, 0x5b, 0xad, 0xc1, 0xda, 0xae, 0x81, 0x3d, 0x1b, 0x60, 0x6f, 0x06, 0xd8, 0xbb, 0x01, 0xb6, + 0x34, 0xc0, 0x3e, 0x0c, 0xb0, 0x4f, 0x03, 0xd6, 0xd6, 0x00, 0x7b, 0xd9, 0x80, 0xb5, 0xdc, 0x80, + 0xb5, 0xda, 0x80, 0x35, 0x74, 0xe8, 0x5b, 0xaf, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0xce, + 0x51, 0x13, 0xa8, 0x01, 0x00, 0x00, +} + +func (this *CachedDependency) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachedDependency) + if !ok { + that2, ok := that.(CachedDependency) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.CacheKey != that1.CacheKey { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.ChecksumAlgorithm != that1.ChecksumAlgorithm { + return false + } + if this.ChecksumValue != that1.ChecksumValue { + return false + } + return true +} +func (this *CachedDependency) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.CachedDependency{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "CacheKey: "+fmt.Sprintf("%#v", this.CacheKey)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "ChecksumAlgorithm: "+fmt.Sprintf("%#v", this.ChecksumAlgorithm)+",\n") + s = append(s, "ChecksumValue: "+fmt.Sprintf("%#v", this.ChecksumValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCachedDependency(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CachedDependency) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachedDependency) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedDependency) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChecksumValue) > 0 { + i -= len(m.ChecksumValue) + copy(dAtA[i:], m.ChecksumValue) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.ChecksumValue))) + i-- + dAtA[i] = 0x3a + } + if len(m.ChecksumAlgorithm) > 0 { + i -= len(m.ChecksumAlgorithm) + copy(dAtA[i:], m.ChecksumAlgorithm) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.ChecksumAlgorithm))) + i-- + dAtA[i] = 0x32 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.CacheKey) > 0 { + i -= len(m.CacheKey) + copy(dAtA[i:], m.CacheKey) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.CacheKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCachedDependency(dAtA []byte, offset int, v uint64) int { + offset -= sovCachedDependency(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CachedDependency) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.CacheKey) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.ChecksumAlgorithm) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.ChecksumValue) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + return n +} + +func sovCachedDependency(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCachedDependency(x uint64) (n int) { + return sovCachedDependency(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CachedDependency) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CachedDependency{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `CacheKey:` + fmt.Sprintf("%v", this.CacheKey) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `ChecksumAlgorithm:` + fmt.Sprintf("%v", this.ChecksumAlgorithm) + `,`, + `ChecksumValue:` + fmt.Sprintf("%v", this.ChecksumValue) + `,`, + `}`, + }, "") + return s +} +func valueToStringCachedDependency(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CachedDependency) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachedDependency: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachedDependency: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumAlgorithm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumAlgorithm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCachedDependency(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCachedDependency + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCachedDependency(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCachedDependency + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCachedDependency + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCachedDependency + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCachedDependency = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCachedDependency = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCachedDependency = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto new file mode 100644 index 00000000..daea97e2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message CachedDependency { + string name = 1 [(gogoproto.jsontag) = "name"]; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string cache_key = 4 [(gogoproto.jsontag) = "cache_key"]; + string log_source = 5 [(gogoproto.jsontag) = "log_source"]; + string checksum_algorithm = 6; + string checksum_value = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go b/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go new file mode 100644 index 00000000..9c940670 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go @@ -0,0 +1,153 @@ +package models + +import "strings" + +type CellSet map[string]*CellPresence + +func NewCellSet() CellSet { + return make(CellSet) +} + +func NewCellSetFromList(cells []*CellPresence) CellSet { + cellSet := NewCellSet() + for _, v := range cells { + cellSet.Add(v) + } + return cellSet +} + +func (set CellSet) Add(cell *CellPresence) { + set[cell.CellId] = cell +} + +func (set CellSet) Each(predicate func(cell *CellPresence)) { + for _, cell := range set { + predicate(cell) + } +} + +func (set CellSet) HasCellID(cellID string) bool { + _, ok := set[cellID] + return ok +} + +func (set CellSet) CellIDs() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + return keys +} + +func NewCellCapacity(memoryMB, diskMB, containers int32) CellCapacity { + return CellCapacity{ + MemoryMb: memoryMB, + DiskMb: diskMB, + Containers: containers, + } +} + +func (cap CellCapacity) Validate() error { + var validationError ValidationError + + if cap.MemoryMb <= 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if cap.DiskMb < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if cap.Containers <= 0 { + validationError = validationError.Append(ErrInvalidField{"containers"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func NewCellPresence( + cellID, repAddress, repUrl, zone string, + capacity CellCapacity, + rootFSProviders, preloadedRootFSes, placementTags, optionalPlacementTags []string, +) CellPresence { + var providers []*Provider + var pProviders []string + pProviders = append(pProviders, preloadedRootFSes...) + providers = append(providers, &Provider{PreloadedRootFSScheme, pProviders}) + providers = append(providers, &Provider{PreloadedOCIRootFSScheme, pProviders}) + + for _, prov := range rootFSProviders { + providers = append(providers, &Provider{prov, []string{}}) + } + + return CellPresence{ + CellId: cellID, + RepAddress: repAddress, + RepUrl: repUrl, + Zone: zone, + Capacity: &capacity, + RootfsProviders: providers, + PlacementTags: placementTags, + OptionalPlacementTags: optionalPlacementTags, + } +} + +func (c CellPresence) Validate() error { + var validationError ValidationError + + if c.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if c.RepAddress == "" { + validationError = validationError.Append(ErrInvalidField{"rep_address"}) + } + + if c.RepUrl != "" && !strings.HasPrefix(c.RepUrl, "http://") && !strings.HasPrefix(c.RepUrl, "https://") { + validationError = validationError.Append(ErrInvalidField{"rep_url"}) + } + + if err := c.Capacity.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +const ( + EventTypeCellDisappeared = "cell_disappeared" +) + +type CellEvent interface { + EventType() string + CellIDs() []string +} + +type CellDisappearedEvent struct { + IDs []string +} + +func NewCellDisappearedEvent(ids []string) CellDisappearedEvent { + return CellDisappearedEvent{ids} +} + +func (CellDisappearedEvent) EventType() string { + return EventTypeCellDisappeared +} + +func (e CellDisappearedEvent) CellIDs() []string { + return e.IDs +} + +func (c *CellPresence) Copy() *CellPresence { + newCellPresense := *c + return &newCellPresense +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go b/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go new file mode 100644 index 00000000..36d9d7b7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go @@ -0,0 +1,1703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cells.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CellCapacity struct { + MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + Containers int32 `protobuf:"varint,3,opt,name=containers,proto3" json:"containers"` +} + +func (m *CellCapacity) Reset() { *m = CellCapacity{} } +func (*CellCapacity) ProtoMessage() {} +func (*CellCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{0} +} +func (m *CellCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellCapacity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellCapacity.Merge(m, src) +} +func (m *CellCapacity) XXX_Size() int { + return m.Size() +} +func (m *CellCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_CellCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_CellCapacity proto.InternalMessageInfo + +func (m *CellCapacity) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *CellCapacity) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *CellCapacity) GetContainers() int32 { + if m != nil { + return m.Containers + } + return 0 +} + +type CellPresence struct { + CellId string `protobuf:"bytes,1,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + RepAddress string `protobuf:"bytes,2,opt,name=rep_address,json=repAddress,proto3" json:"rep_address"` + Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone"` + Capacity *CellCapacity `protobuf:"bytes,4,opt,name=capacity,proto3" json:"capacity,omitempty"` + RootfsProviders []*Provider `protobuf:"bytes,5,rep,name=rootfs_providers,json=rootfsProviders,proto3" json:"rootfs_provider_list,omitempty"` + PlacementTags []string `protobuf:"bytes,6,rep,name=placement_tags,json=placementTags,proto3" json:"placement_tags,omitempty"` + OptionalPlacementTags []string `protobuf:"bytes,7,rep,name=optional_placement_tags,json=optionalPlacementTags,proto3" json:"optional_placement_tags,omitempty"` + RepUrl string `protobuf:"bytes,8,opt,name=rep_url,json=repUrl,proto3" json:"rep_url"` +} + +func (m *CellPresence) Reset() { *m = CellPresence{} } +func (*CellPresence) ProtoMessage() {} +func (*CellPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{1} +} +func (m *CellPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellPresence.Merge(m, src) +} +func (m *CellPresence) XXX_Size() int { + return m.Size() +} +func (m *CellPresence) XXX_DiscardUnknown() { + xxx_messageInfo_CellPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_CellPresence proto.InternalMessageInfo + +func (m *CellPresence) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *CellPresence) GetRepAddress() string { + if m != nil { + return m.RepAddress + } + return "" +} + +func (m *CellPresence) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CellPresence) GetCapacity() *CellCapacity { + if m != nil { + return m.Capacity + } + return nil +} + +func (m *CellPresence) GetRootfsProviders() []*Provider { + if m != nil { + return m.RootfsProviders + } + return nil +} + +func (m *CellPresence) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *CellPresence) GetOptionalPlacementTags() []string { + if m != nil { + return m.OptionalPlacementTags + } + return nil +} + +func (m *CellPresence) GetRepUrl() string { + if m != nil { + return m.RepUrl + } + return "" +} + +type Provider struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + Properties []string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty"` +} + +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{2} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Provider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Provider) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Provider) GetProperties() []string { + if m != nil { + return m.Properties + } + return nil +} + +type CellsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Cells []*CellPresence `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (m *CellsResponse) Reset() { *m = CellsResponse{} } +func (*CellsResponse) ProtoMessage() {} +func (*CellsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{3} +} +func (m *CellsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellsResponse.Merge(m, src) +} +func (m *CellsResponse) XXX_Size() int { + return m.Size() +} +func (m *CellsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CellsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CellsResponse proto.InternalMessageInfo + +func (m *CellsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *CellsResponse) GetCells() []*CellPresence { + if m != nil { + return m.Cells + } + return nil +} + +func init() { + proto.RegisterType((*CellCapacity)(nil), "models.CellCapacity") + proto.RegisterType((*CellPresence)(nil), "models.CellPresence") + proto.RegisterType((*Provider)(nil), "models.Provider") + proto.RegisterType((*CellsResponse)(nil), "models.CellsResponse") +} + +func init() { proto.RegisterFile("cells.proto", fileDescriptor_842e821272d22ff7) } + +var fileDescriptor_842e821272d22ff7 = []byte{ + // 548 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, + 0x14, 0x4f, 0xe8, 0x9a, 0xb5, 0x0e, 0xdd, 0x26, 0x0b, 0x44, 0x35, 0x21, 0xa7, 0x2a, 0x43, 0xaa, + 0x26, 0xe8, 0xa6, 0x81, 0xb8, 0xd3, 0x09, 0x09, 0x0e, 0x93, 0x26, 0x0b, 0xce, 0x21, 0x7f, 0xde, + 0x4a, 0x44, 0x12, 0x5b, 0xb6, 0x8b, 0x54, 0x4e, 0x7c, 0x84, 0x7d, 0x00, 0x3e, 0x00, 0x1f, 0x85, + 0x63, 0x8f, 0x3b, 0x45, 0x34, 0xbd, 0xa0, 0x9c, 0xf6, 0x11, 0x90, 0x9d, 0x66, 0x2b, 0xbd, 0x58, + 0xbf, 0xf7, 0x7b, 0xbf, 0x67, 0x3f, 0xff, 0x9e, 0x8d, 0xdc, 0x08, 0xd2, 0x54, 0x8e, 0xb9, 0x60, + 0x8a, 0x61, 0x27, 0x63, 0x31, 0xa4, 0xf2, 0xf0, 0xe5, 0x34, 0x51, 0x5f, 0x66, 0xe1, 0x38, 0x62, + 0xd9, 0xc9, 0x94, 0x4d, 0xd9, 0x89, 0x49, 0x87, 0xb3, 0x2b, 0x13, 0x99, 0xc0, 0xa0, 0xba, 0xec, + 0xd0, 0x05, 0x21, 0x98, 0xa8, 0x83, 0xe1, 0xb5, 0x8d, 0x1e, 0x9e, 0x43, 0x9a, 0x9e, 0x07, 0x3c, + 0x88, 0x12, 0x35, 0xc7, 0xc7, 0xa8, 0x9b, 0x41, 0xc6, 0xc4, 0xdc, 0xcf, 0xc2, 0xbe, 0x3d, 0xb0, + 0x47, 0xed, 0x49, 0xaf, 0x2a, 0xbc, 0x7b, 0x92, 0x76, 0x6a, 0x78, 0x11, 0xe2, 0x23, 0xb4, 0x1b, + 0x27, 0xf2, 0xab, 0x56, 0x3e, 0x30, 0x4a, 0xb7, 0x2a, 0xbc, 0x86, 0xa2, 0x8e, 0x06, 0x17, 0x21, + 0x1e, 0x23, 0x14, 0xb1, 0x5c, 0x05, 0x49, 0x0e, 0x42, 0xf6, 0x5b, 0x46, 0xb8, 0x57, 0x15, 0xde, + 0x06, 0x4b, 0x37, 0xf0, 0xf0, 0x67, 0xab, 0x6e, 0xe9, 0x52, 0x80, 0x84, 0x3c, 0x02, 0x7d, 0x8c, + 0xbe, 0xb6, 0x9f, 0xc4, 0xa6, 0xa1, 0x6e, 0x7d, 0xcc, 0x9a, 0xa2, 0x8e, 0x06, 0x1f, 0x62, 0x7c, + 0x8a, 0x5c, 0x01, 0xdc, 0x0f, 0xe2, 0x58, 0x80, 0x94, 0xa6, 0xa1, 0xee, 0x64, 0xbf, 0x2a, 0xbc, + 0x4d, 0x9a, 0x22, 0x01, 0xfc, 0x6d, 0x8d, 0xf1, 0x53, 0xb4, 0xf3, 0x9d, 0xe5, 0x60, 0x5a, 0xea, + 0x4e, 0x3a, 0x55, 0xe1, 0x99, 0x98, 0x9a, 0x15, 0x9f, 0xa2, 0x4e, 0xb4, 0x36, 0xa5, 0xbf, 0x33, + 0xb0, 0x47, 0xee, 0xd9, 0xa3, 0x71, 0x6d, 0xf8, 0x78, 0xd3, 0x30, 0x7a, 0xa7, 0xc2, 0x3e, 0x3a, + 0x10, 0x8c, 0xa9, 0x2b, 0xe9, 0x73, 0xc1, 0xbe, 0x25, 0xb1, 0xbe, 0x6e, 0x7b, 0xd0, 0x1a, 0xb9, + 0x67, 0x07, 0x4d, 0xe5, 0xe5, 0x3a, 0x31, 0x19, 0x56, 0x85, 0x47, 0xb6, 0xd4, 0x7e, 0x9a, 0x48, + 0xf5, 0x82, 0x65, 0x89, 0x82, 0x8c, 0xab, 0x39, 0xdd, 0xaf, 0xf3, 0x4d, 0x8d, 0xc4, 0xcf, 0xd1, + 0x1e, 0x4f, 0x83, 0x08, 0x32, 0xc8, 0x95, 0xaf, 0x82, 0xa9, 0xec, 0x3b, 0x83, 0xd6, 0xa8, 0x4b, + 0x7b, 0x77, 0xec, 0xc7, 0x60, 0x2a, 0xf1, 0x1b, 0xf4, 0x84, 0x71, 0x95, 0xb0, 0x3c, 0x48, 0xfd, + 0x2d, 0xfd, 0xae, 0xd1, 0x3f, 0x6e, 0xd2, 0x97, 0xff, 0xd5, 0x1d, 0xa1, 0x5d, 0x6d, 0xd5, 0x4c, + 0xa4, 0xfd, 0xce, 0xbd, 0xcf, 0x6b, 0x8a, 0x3a, 0x02, 0xf8, 0x27, 0x91, 0x0e, 0xdf, 0xa3, 0x4e, + 0xd3, 0x91, 0x76, 0x30, 0x0f, 0x32, 0x58, 0x8f, 0xc5, 0x38, 0xa8, 0x63, 0x6a, 0x56, 0x4c, 0x10, + 0xe2, 0x82, 0x71, 0x10, 0x2a, 0x01, 0x3d, 0x10, 0x7d, 0xf4, 0x06, 0x33, 0xfc, 0x8c, 0x7a, 0xda, + 0x49, 0x49, 0x41, 0x72, 0x96, 0x4b, 0xc0, 0xcf, 0x50, 0xdb, 0xbc, 0x4d, 0xb3, 0x9f, 0x7b, 0xd6, + 0x6b, 0x5c, 0x7b, 0xa7, 0x49, 0x5a, 0xe7, 0xf0, 0x31, 0x6a, 0x9b, 0x4f, 0x60, 0x36, 0xdc, 0x1a, + 0x4a, 0xf3, 0x64, 0x68, 0x2d, 0x99, 0xbc, 0x5e, 0x2c, 0x89, 0x75, 0xb3, 0x24, 0xd6, 0xed, 0x92, + 0xd8, 0x3f, 0x4a, 0x62, 0xff, 0x2a, 0x89, 0xfd, 0xbb, 0x24, 0xf6, 0xa2, 0x24, 0xf6, 0x9f, 0x92, + 0xd8, 0x7f, 0x4b, 0x62, 0xdd, 0x96, 0xc4, 0xbe, 0x5e, 0x11, 0x6b, 0xb1, 0x22, 0xd6, 0xcd, 0x8a, + 0x58, 0xa1, 0x63, 0xbe, 0xc6, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x12, 0x8c, 0x77, 0x8d, + 0x6d, 0x03, 0x00, 0x00, +} + +func (this *CellCapacity) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellCapacity) + if !ok { + that2, ok := that.(CellCapacity) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.Containers != that1.Containers { + return false + } + return true +} +func (this *CellPresence) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellPresence) + if !ok { + that2, ok := that.(CellPresence) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.RepAddress != that1.RepAddress { + return false + } + if this.Zone != that1.Zone { + return false + } + if !this.Capacity.Equal(that1.Capacity) { + return false + } + if len(this.RootfsProviders) != len(that1.RootfsProviders) { + return false + } + for i := range this.RootfsProviders { + if !this.RootfsProviders[i].Equal(that1.RootfsProviders[i]) { + return false + } + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if len(this.OptionalPlacementTags) != len(that1.OptionalPlacementTags) { + return false + } + for i := range this.OptionalPlacementTags { + if this.OptionalPlacementTags[i] != that1.OptionalPlacementTags[i] { + return false + } + } + if this.RepUrl != that1.RepUrl { + return false + } + return true +} +func (this *Provider) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Provider) + if !ok { + that2, ok := that.(Provider) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Properties) != len(that1.Properties) { + return false + } + for i := range this.Properties { + if this.Properties[i] != that1.Properties[i] { + return false + } + } + return true +} +func (this *CellsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellsResponse) + if !ok { + that2, ok := that.(CellsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.Cells) != len(that1.Cells) { + return false + } + for i := range this.Cells { + if !this.Cells[i].Equal(that1.Cells[i]) { + return false + } + } + return true +} +func (this *CellCapacity) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CellCapacity{") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "Containers: "+fmt.Sprintf("%#v", this.Containers)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CellPresence) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.CellPresence{") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "RepAddress: "+fmt.Sprintf("%#v", this.RepAddress)+",\n") + s = append(s, "Zone: "+fmt.Sprintf("%#v", this.Zone)+",\n") + if this.Capacity != nil { + s = append(s, "Capacity: "+fmt.Sprintf("%#v", this.Capacity)+",\n") + } + if this.RootfsProviders != nil { + s = append(s, "RootfsProviders: "+fmt.Sprintf("%#v", this.RootfsProviders)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "OptionalPlacementTags: "+fmt.Sprintf("%#v", this.OptionalPlacementTags)+",\n") + s = append(s, "RepUrl: "+fmt.Sprintf("%#v", this.RepUrl)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Provider) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Provider{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Properties: "+fmt.Sprintf("%#v", this.Properties)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CellsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.CellsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Cells != nil { + s = append(s, "Cells: "+fmt.Sprintf("%#v", this.Cells)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCells(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CellCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Containers != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.Containers)) + i-- + dAtA[i] = 0x18 + } + if m.DiskMb != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.MemoryMb != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CellPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RepUrl) > 0 { + i -= len(m.RepUrl) + copy(dAtA[i:], m.RepUrl) + i = encodeVarintCells(dAtA, i, uint64(len(m.RepUrl))) + i-- + dAtA[i] = 0x42 + } + if len(m.OptionalPlacementTags) > 0 { + for iNdEx := len(m.OptionalPlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OptionalPlacementTags[iNdEx]) + copy(dAtA[i:], m.OptionalPlacementTags[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.OptionalPlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RootfsProviders) > 0 { + for iNdEx := len(m.RootfsProviders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RootfsProviders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = encodeVarintCells(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x1a + } + if len(m.RepAddress) > 0 { + i -= len(m.RepAddress) + copy(dAtA[i:], m.RepAddress) + i = encodeVarintCells(dAtA, i, uint64(len(m.RepAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintCells(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Properties) > 0 { + for iNdEx := len(m.Properties) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Properties[iNdEx]) + copy(dAtA[i:], m.Properties[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.Properties[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCells(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CellsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Cells[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCells(dAtA []byte, offset int, v uint64) int { + offset -= sovCells(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CellCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryMb != 0 { + n += 1 + sovCells(uint64(m.MemoryMb)) + } + if m.DiskMb != 0 { + n += 1 + sovCells(uint64(m.DiskMb)) + } + if m.Containers != 0 { + n += 1 + sovCells(uint64(m.Containers)) + } + return n +} + +func (m *CellPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + l = len(m.RepAddress) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + l = len(m.Zone) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovCells(uint64(l)) + } + if len(m.RootfsProviders) > 0 { + for _, e := range m.RootfsProviders { + l = e.Size() + n += 1 + l + sovCells(uint64(l)) + } + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + if len(m.OptionalPlacementTags) > 0 { + for _, s := range m.OptionalPlacementTags { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + l = len(m.RepUrl) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + return n +} + +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + if len(m.Properties) > 0 { + for _, s := range m.Properties { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + return n +} + +func (m *CellsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovCells(uint64(l)) + } + if len(m.Cells) > 0 { + for _, e := range m.Cells { + l = e.Size() + n += 1 + l + sovCells(uint64(l)) + } + } + return n +} + +func sovCells(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCells(x uint64) (n int) { + return sovCells(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CellCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CellCapacity{`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `Containers:` + fmt.Sprintf("%v", this.Containers) + `,`, + `}`, + }, "") + return s +} +func (this *CellPresence) String() string { + if this == nil { + return "nil" + } + repeatedStringForRootfsProviders := "[]*Provider{" + for _, f := range this.RootfsProviders { + repeatedStringForRootfsProviders += strings.Replace(f.String(), "Provider", "Provider", 1) + "," + } + repeatedStringForRootfsProviders += "}" + s := strings.Join([]string{`&CellPresence{`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `RepAddress:` + fmt.Sprintf("%v", this.RepAddress) + `,`, + `Zone:` + fmt.Sprintf("%v", this.Zone) + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CellCapacity", "CellCapacity", 1) + `,`, + `RootfsProviders:` + repeatedStringForRootfsProviders + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `OptionalPlacementTags:` + fmt.Sprintf("%v", this.OptionalPlacementTags) + `,`, + `RepUrl:` + fmt.Sprintf("%v", this.RepUrl) + `,`, + `}`, + }, "") + return s +} +func (this *Provider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Provider{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Properties:` + fmt.Sprintf("%v", this.Properties) + `,`, + `}`, + }, "") + return s +} +func (this *CellsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForCells := "[]*CellPresence{" + for _, f := range this.Cells { + repeatedStringForCells += strings.Replace(f.String(), "CellPresence", "CellPresence", 1) + "," + } + repeatedStringForCells += "}" + s := strings.Join([]string{`&CellsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Cells:` + repeatedStringForCells + `,`, + `}`, + }, "") + return s +} +func valueToStringCells(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CellCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + m.Containers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Containers |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CellCapacity{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootfsProviders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootfsProviders = append(m.RootfsProviders, &Provider{}) + if err := m.RootfsProviders[len(m.RootfsProviders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalPlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OptionalPlacementTags = append(m.OptionalPlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Properties = append(m.Properties, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, &CellPresence{}) + if err := m.Cells[len(m.Cells)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCells(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCells + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCells + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCells + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCells = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCells = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCells = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/cells.proto b/vendor/code.cloudfoundry.org/bbs/models/cells.proto new file mode 100644 index 00000000..e73a028a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cells.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "error.proto"; + +message CellCapacity { + int32 memory_mb = 1 [(gogoproto.jsontag) = "memory_mb"]; + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + int32 containers = 3 [(gogoproto.jsontag) = "containers"]; +} + +message CellPresence { + string cell_id = 1 [(gogoproto.jsontag) = "cell_id"]; + string rep_address = 2 [(gogoproto.jsontag) = "rep_address"]; + string zone = 3 [(gogoproto.jsontag) = "zone"]; + CellCapacity capacity = 4; + repeated Provider rootfs_providers = 5 [(gogoproto.jsontag) = "rootfs_provider_list,omitempty"]; + repeated string placement_tags = 6; + repeated string optional_placement_tags = 7; + string rep_url = 8 [(gogoproto.jsontag) = "rep_url"]; +} + +message Provider { + string name = 1 [(gogoproto.jsontag) = "name"]; + repeated string properties = 2; +} + +message CellsResponse { + Error error = 1; + repeated CellPresence cells = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go new file mode 100644 index 00000000..7815f28f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go @@ -0,0 +1,385 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: certificate_properties.proto + +package models + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CertificateProperties struct { + OrganizationalUnit []string `protobuf:"bytes,1,rep,name=organizational_unit,json=organizationalUnit,proto3" json:"organizational_unit,omitempty"` +} + +func (m *CertificateProperties) Reset() { *m = CertificateProperties{} } +func (*CertificateProperties) ProtoMessage() {} +func (*CertificateProperties) Descriptor() ([]byte, []int) { + return fileDescriptor_9291b57c1fe01997, []int{0} +} +func (m *CertificateProperties) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateProperties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CertificateProperties.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CertificateProperties) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateProperties.Merge(m, src) +} +func (m *CertificateProperties) XXX_Size() int { + return m.Size() +} +func (m *CertificateProperties) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateProperties.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateProperties proto.InternalMessageInfo + +func (m *CertificateProperties) GetOrganizationalUnit() []string { + if m != nil { + return m.OrganizationalUnit + } + return nil +} + +func init() { + proto.RegisterType((*CertificateProperties)(nil), "models.CertificateProperties") +} + +func init() { proto.RegisterFile("certificate_properties.proto", fileDescriptor_9291b57c1fe01997) } + +var fileDescriptor_9291b57c1fe01997 = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x4e, 0x2d, 0x2a, + 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0x49, 0x8d, 0x2f, 0x28, 0xca, 0x2f, 0x00, 0x71, 0x53, 0x8b, + 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xd8, 0x72, 0xf3, 0x53, 0x52, 0x73, 0x8a, 0x95, 0x3c, + 0xb8, 0x44, 0x9d, 0x11, 0xea, 0x02, 0xe0, 0xca, 0x84, 0xf4, 0xb9, 0x84, 0xf3, 0x8b, 0xd2, 0x13, + 0xf3, 0x32, 0xab, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x12, 0x73, 0xe2, 0x4b, 0xf3, 0x32, 0x4b, 0x24, + 0x18, 0x15, 0x98, 0x35, 0x38, 0x83, 0x84, 0x50, 0xa5, 0x42, 0xf3, 0x32, 0x4b, 0x9c, 0x4c, 0x2e, + 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, + 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, + 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x9d, 0x63, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x07, 0xe2, 0x02, 0xdf, 0xae, 0x00, 0x00, 0x00, +} + +func (this *CertificateProperties) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CertificateProperties) + if !ok { + that2, ok := that.(CertificateProperties) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.OrganizationalUnit) != len(that1.OrganizationalUnit) { + return false + } + for i := range this.OrganizationalUnit { + if this.OrganizationalUnit[i] != that1.OrganizationalUnit[i] { + return false + } + } + return true +} +func (this *CertificateProperties) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.CertificateProperties{") + s = append(s, "OrganizationalUnit: "+fmt.Sprintf("%#v", this.OrganizationalUnit)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCertificateProperties(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CertificateProperties) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateProperties) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateProperties) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OrganizationalUnit) > 0 { + for iNdEx := len(m.OrganizationalUnit) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OrganizationalUnit[iNdEx]) + copy(dAtA[i:], m.OrganizationalUnit[iNdEx]) + i = encodeVarintCertificateProperties(dAtA, i, uint64(len(m.OrganizationalUnit[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintCertificateProperties(dAtA []byte, offset int, v uint64) int { + offset -= sovCertificateProperties(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CertificateProperties) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.OrganizationalUnit) > 0 { + for _, s := range m.OrganizationalUnit { + l = len(s) + n += 1 + l + sovCertificateProperties(uint64(l)) + } + } + return n +} + +func sovCertificateProperties(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCertificateProperties(x uint64) (n int) { + return sovCertificateProperties(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateProperties) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateProperties{`, + `OrganizationalUnit:` + fmt.Sprintf("%v", this.OrganizationalUnit) + `,`, + `}`, + }, "") + return s +} +func valueToStringCertificateProperties(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateProperties) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateProperties: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateProperties: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrganizationalUnit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCertificateProperties + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCertificateProperties + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrganizationalUnit = append(m.OrganizationalUnit, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCertificateProperties(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCertificateProperties + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCertificateProperties(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCertificateProperties + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCertificateProperties + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCertificateProperties + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCertificateProperties = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCertificateProperties = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCertificateProperties = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto new file mode 100644 index 00000000..9eced102 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package models; + +message CertificateProperties { + repeated string organizational_unit = 1; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.go b/vendor/code.cloudfoundry.org/bbs/models/check_definition.go new file mode 100644 index 00000000..9772c5db --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.go @@ -0,0 +1,55 @@ +package models + +type PortChecker interface { + GetPort() uint32 +} + +func (check CheckDefinition) Validate() error { + var validationError ValidationError + + checks := check.GetChecks() + + for _, check := range checks { + checkError := check.Validate() + if checkError != nil { + validationError = validationError.Append(checkError) + } + } + + readiness_checks := check.GetReadinessChecks() + + for _, check := range readiness_checks { + checkError := check.Validate() + if checkError != nil { + validationError = validationError.Append(checkError) + } + } + + return validationError.ToError() + +} + +func (check Check) GetPortChecker() PortChecker { + httpCheck := check.GetHttpCheck() + tcpCheck := check.GetTcpCheck() + if httpCheck != nil && tcpCheck != nil { + return nil + } + if httpCheck != nil { + return httpCheck + } else { + return tcpCheck + } +} + +func (check Check) Validate() error { + var validationError ValidationError + c := check.GetPortChecker() + + if c == nil { + validationError = validationError.Append(ErrInvalidField{"check"}) + } else if !(c.GetPort() > 0 && c.GetPort() <= 65535) { + validationError = validationError.Append(ErrInvalidField{"port"}) + } + return validationError.ToError() +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go b/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go new file mode 100644 index 00000000..d4c6ff75 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go @@ -0,0 +1,1453 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: check_definition.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CheckDefinition struct { + Checks []*Check `protobuf:"bytes,1,rep,name=checks,proto3" json:"checks,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source"` + ReadinessChecks []*Check `protobuf:"bytes,3,rep,name=readiness_checks,json=readinessChecks,proto3" json:"readiness_checks,omitempty"` +} + +func (m *CheckDefinition) Reset() { *m = CheckDefinition{} } +func (*CheckDefinition) ProtoMessage() {} +func (*CheckDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{0} +} +func (m *CheckDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckDefinition.Merge(m, src) +} +func (m *CheckDefinition) XXX_Size() int { + return m.Size() +} +func (m *CheckDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CheckDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckDefinition proto.InternalMessageInfo + +func (m *CheckDefinition) GetChecks() []*Check { + if m != nil { + return m.Checks + } + return nil +} + +func (m *CheckDefinition) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *CheckDefinition) GetReadinessChecks() []*Check { + if m != nil { + return m.ReadinessChecks + } + return nil +} + +type Check struct { + // oneof is hard to use right now, instead we can do this check in validation + // oneof check { + TcpCheck *TCPCheck `protobuf:"bytes,1,opt,name=tcp_check,json=tcpCheck,proto3" json:"tcp_check,omitempty"` + HttpCheck *HTTPCheck `protobuf:"bytes,2,opt,name=http_check,json=httpCheck,proto3" json:"http_check,omitempty"` +} + +func (m *Check) Reset() { *m = Check{} } +func (*Check) ProtoMessage() {} +func (*Check) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{1} +} +func (m *Check) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Check) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Check.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Check) XXX_Merge(src proto.Message) { + xxx_messageInfo_Check.Merge(m, src) +} +func (m *Check) XXX_Size() int { + return m.Size() +} +func (m *Check) XXX_DiscardUnknown() { + xxx_messageInfo_Check.DiscardUnknown(m) +} + +var xxx_messageInfo_Check proto.InternalMessageInfo + +func (m *Check) GetTcpCheck() *TCPCheck { + if m != nil { + return m.TcpCheck + } + return nil +} + +func (m *Check) GetHttpCheck() *HTTPCheck { + if m != nil { + return m.HttpCheck + } + return nil +} + +type TCPCheck struct { + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port"` + ConnectTimeoutMs uint64 `protobuf:"varint,2,opt,name=connect_timeout_ms,json=connectTimeoutMs,proto3" json:"connect_timeout_ms,omitempty"` + IntervalMs uint64 `protobuf:"varint,3,opt,name=interval_ms,json=intervalMs,proto3" json:"interval_ms,omitempty"` +} + +func (m *TCPCheck) Reset() { *m = TCPCheck{} } +func (*TCPCheck) ProtoMessage() {} +func (*TCPCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{2} +} +func (m *TCPCheck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TCPCheck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TCPCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPCheck.Merge(m, src) +} +func (m *TCPCheck) XXX_Size() int { + return m.Size() +} +func (m *TCPCheck) XXX_DiscardUnknown() { + xxx_messageInfo_TCPCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_TCPCheck proto.InternalMessageInfo + +func (m *TCPCheck) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *TCPCheck) GetConnectTimeoutMs() uint64 { + if m != nil { + return m.ConnectTimeoutMs + } + return 0 +} + +func (m *TCPCheck) GetIntervalMs() uint64 { + if m != nil { + return m.IntervalMs + } + return 0 +} + +type HTTPCheck struct { + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port"` + RequestTimeoutMs uint64 `protobuf:"varint,2,opt,name=request_timeout_ms,json=requestTimeoutMs,proto3" json:"request_timeout_ms,omitempty"` + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path"` + IntervalMs uint64 `protobuf:"varint,4,opt,name=interval_ms,json=intervalMs,proto3" json:"interval_ms,omitempty"` +} + +func (m *HTTPCheck) Reset() { *m = HTTPCheck{} } +func (*HTTPCheck) ProtoMessage() {} +func (*HTTPCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{3} +} +func (m *HTTPCheck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPCheck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPCheck.Merge(m, src) +} +func (m *HTTPCheck) XXX_Size() int { + return m.Size() +} +func (m *HTTPCheck) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPCheck proto.InternalMessageInfo + +func (m *HTTPCheck) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *HTTPCheck) GetRequestTimeoutMs() uint64 { + if m != nil { + return m.RequestTimeoutMs + } + return 0 +} + +func (m *HTTPCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *HTTPCheck) GetIntervalMs() uint64 { + if m != nil { + return m.IntervalMs + } + return 0 +} + +func init() { + proto.RegisterType((*CheckDefinition)(nil), "models.CheckDefinition") + proto.RegisterType((*Check)(nil), "models.Check") + proto.RegisterType((*TCPCheck)(nil), "models.TCPCheck") + proto.RegisterType((*HTTPCheck)(nil), "models.HTTPCheck") +} + +func init() { proto.RegisterFile("check_definition.proto", fileDescriptor_048a62b88ce7913d) } + +var fileDescriptor_048a62b88ce7913d = []byte{ + // 414 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x31, 0x8f, 0xd3, 0x40, + 0x10, 0x85, 0xbd, 0x97, 0x10, 0xc5, 0x13, 0x1d, 0x17, 0xb6, 0x40, 0x11, 0x42, 0x9b, 0xc8, 0x12, + 0x52, 0x0a, 0xe2, 0x43, 0x07, 0x05, 0x75, 0x8e, 0x82, 0xe6, 0x24, 0x64, 0xdc, 0x5b, 0xce, 0x66, + 0xcf, 0xb6, 0xb0, 0xbd, 0xc6, 0xbb, 0x86, 0x96, 0x9f, 0x40, 0x45, 0x4f, 0xc7, 0x4f, 0xa1, 0x4c, + 0x99, 0x2a, 0x22, 0x4e, 0x83, 0x52, 0xe5, 0x27, 0x20, 0x8f, 0xed, 0x80, 0xa2, 0x48, 0x34, 0xd6, + 0xcc, 0xbe, 0xef, 0xbd, 0x37, 0x85, 0xe1, 0x31, 0x0f, 0x05, 0xff, 0xe0, 0x2d, 0xc5, 0x7d, 0x94, + 0x46, 0x3a, 0x92, 0xa9, 0x9d, 0xe5, 0x52, 0x4b, 0xda, 0x4b, 0xe4, 0x52, 0xc4, 0xea, 0xc9, 0x2c, + 0x88, 0x74, 0x58, 0x2c, 0x6c, 0x2e, 0x93, 0xeb, 0x40, 0x06, 0xf2, 0x1a, 0xe5, 0x45, 0x71, 0x8f, + 0x1b, 0x2e, 0x38, 0xd5, 0x36, 0xeb, 0x3b, 0x81, 0xab, 0xdb, 0x2a, 0xf1, 0xcd, 0x31, 0x90, 0x3e, + 0x83, 0x1e, 0x96, 0xa8, 0x11, 0x99, 0x74, 0xa6, 0x83, 0x9b, 0x4b, 0xbb, 0xce, 0xb6, 0x11, 0x74, + 0x1a, 0x91, 0xce, 0x00, 0x62, 0x19, 0x78, 0x4a, 0x16, 0x39, 0x17, 0xa3, 0x8b, 0x09, 0x99, 0x9a, + 0xf3, 0x87, 0xfb, 0xcd, 0xf8, 0x9f, 0x57, 0xc7, 0x8c, 0x65, 0xf0, 0x1e, 0x47, 0xfa, 0x1a, 0x86, + 0xb9, 0xf0, 0x97, 0x51, 0x2a, 0x94, 0xf2, 0x9a, 0xfc, 0xce, 0xb9, 0xfc, 0xab, 0x23, 0x86, 0xbb, + 0xb2, 0x42, 0x78, 0x80, 0x13, 0x9d, 0x81, 0xa9, 0x79, 0x56, 0x9b, 0x47, 0x64, 0x42, 0xa6, 0x83, + 0x9b, 0x61, 0xeb, 0x75, 0x6f, 0xdf, 0xd5, 0xf6, 0xbe, 0xe6, 0x59, 0x8d, 0xbf, 0x00, 0x08, 0xb5, + 0x6e, 0xf9, 0x0b, 0xe4, 0x1f, 0xb5, 0xfc, 0x5b, 0xd7, 0x6d, 0x0c, 0x66, 0x05, 0xe1, 0x68, 0x7d, + 0x86, 0x7e, 0x9b, 0x43, 0x9f, 0x42, 0x37, 0x93, 0xb9, 0xc6, 0x9e, 0xcb, 0x79, 0x7f, 0xbf, 0x19, + 0xe3, 0xee, 0xe0, 0x97, 0x3e, 0x07, 0xca, 0x65, 0x9a, 0x0a, 0xae, 0x3d, 0x1d, 0x25, 0x42, 0x16, + 0xda, 0x4b, 0x14, 0x76, 0x74, 0x9d, 0x61, 0xa3, 0xb8, 0xb5, 0x70, 0xa7, 0xe8, 0x18, 0x06, 0x51, + 0xaa, 0x45, 0xfe, 0xc9, 0x8f, 0x2b, 0xac, 0x83, 0x18, 0xb4, 0x4f, 0x77, 0xca, 0xfa, 0x46, 0xc0, + 0x3c, 0x5e, 0xf4, 0xff, 0xea, 0x5c, 0x7c, 0x2c, 0x84, 0x3a, 0x57, 0xdd, 0x28, 0x7f, 0xab, 0xab, + 0x2c, 0x5f, 0x87, 0xd8, 0x69, 0x36, 0x59, 0xbe, 0x0e, 0x1d, 0xfc, 0x9e, 0x1e, 0xd6, 0x3d, 0x3d, + 0x6c, 0xfe, 0x6a, 0xb5, 0x65, 0xc6, 0x7a, 0xcb, 0x8c, 0xc3, 0x96, 0x91, 0x2f, 0x25, 0x23, 0x3f, + 0x4a, 0x46, 0x7e, 0x96, 0x8c, 0xac, 0x4a, 0x46, 0x7e, 0x95, 0x8c, 0xfc, 0x2e, 0x99, 0x71, 0x28, + 0x19, 0xf9, 0xba, 0x63, 0xc6, 0x6a, 0xc7, 0x8c, 0xf5, 0x8e, 0x19, 0x8b, 0x1e, 0xfe, 0x5c, 0x2f, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0x3b, 0x86, 0xad, 0x02, 0x00, 0x00, +} + +func (this *CheckDefinition) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckDefinition) + if !ok { + that2, ok := that.(CheckDefinition) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Checks) != len(that1.Checks) { + return false + } + for i := range this.Checks { + if !this.Checks[i].Equal(that1.Checks[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + if len(this.ReadinessChecks) != len(that1.ReadinessChecks) { + return false + } + for i := range this.ReadinessChecks { + if !this.ReadinessChecks[i].Equal(that1.ReadinessChecks[i]) { + return false + } + } + return true +} +func (this *Check) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Check) + if !ok { + that2, ok := that.(Check) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TcpCheck.Equal(that1.TcpCheck) { + return false + } + if !this.HttpCheck.Equal(that1.HttpCheck) { + return false + } + return true +} +func (this *TCPCheck) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TCPCheck) + if !ok { + that2, ok := that.(TCPCheck) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.ConnectTimeoutMs != that1.ConnectTimeoutMs { + return false + } + if this.IntervalMs != that1.IntervalMs { + return false + } + return true +} +func (this *HTTPCheck) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HTTPCheck) + if !ok { + that2, ok := that.(HTTPCheck) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.RequestTimeoutMs != that1.RequestTimeoutMs { + return false + } + if this.Path != that1.Path { + return false + } + if this.IntervalMs != that1.IntervalMs { + return false + } + return true +} +func (this *CheckDefinition) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CheckDefinition{") + if this.Checks != nil { + s = append(s, "Checks: "+fmt.Sprintf("%#v", this.Checks)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + if this.ReadinessChecks != nil { + s = append(s, "ReadinessChecks: "+fmt.Sprintf("%#v", this.ReadinessChecks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Check) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Check{") + if this.TcpCheck != nil { + s = append(s, "TcpCheck: "+fmt.Sprintf("%#v", this.TcpCheck)+",\n") + } + if this.HttpCheck != nil { + s = append(s, "HttpCheck: "+fmt.Sprintf("%#v", this.HttpCheck)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TCPCheck) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.TCPCheck{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "ConnectTimeoutMs: "+fmt.Sprintf("%#v", this.ConnectTimeoutMs)+",\n") + s = append(s, "IntervalMs: "+fmt.Sprintf("%#v", this.IntervalMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HTTPCheck) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.HTTPCheck{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "RequestTimeoutMs: "+fmt.Sprintf("%#v", this.RequestTimeoutMs)+",\n") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "IntervalMs: "+fmt.Sprintf("%#v", this.IntervalMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCheckDefinition(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CheckDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ReadinessChecks) > 0 { + for iNdEx := len(m.ReadinessChecks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessChecks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintCheckDefinition(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Checks) > 0 { + for iNdEx := len(m.Checks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Checks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Check) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Check) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Check) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HttpCheck != nil { + { + size, err := m.HttpCheck.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TcpCheck != nil { + { + size, err := m.TcpCheck.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TCPCheck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPCheck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IntervalMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.IntervalMs)) + i-- + dAtA[i] = 0x18 + } + if m.ConnectTimeoutMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.ConnectTimeoutMs)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPCheck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPCheck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IntervalMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.IntervalMs)) + i-- + dAtA[i] = 0x20 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintCheckDefinition(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.RequestTimeoutMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.RequestTimeoutMs)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintCheckDefinition(dAtA []byte, offset int, v uint64) int { + offset -= sovCheckDefinition(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CheckDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Checks) > 0 { + for _, e := range m.Checks { + l = e.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if len(m.ReadinessChecks) > 0 { + for _, e := range m.ReadinessChecks { + l = e.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + } + return n +} + +func (m *Check) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpCheck != nil { + l = m.TcpCheck.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if m.HttpCheck != nil { + l = m.HttpCheck.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + return n +} + +func (m *TCPCheck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovCheckDefinition(uint64(m.Port)) + } + if m.ConnectTimeoutMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.ConnectTimeoutMs)) + } + if m.IntervalMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.IntervalMs)) + } + return n +} + +func (m *HTTPCheck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovCheckDefinition(uint64(m.Port)) + } + if m.RequestTimeoutMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.RequestTimeoutMs)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if m.IntervalMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.IntervalMs)) + } + return n +} + +func sovCheckDefinition(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCheckDefinition(x uint64) (n int) { + return sovCheckDefinition(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CheckDefinition) String() string { + if this == nil { + return "nil" + } + repeatedStringForChecks := "[]*Check{" + for _, f := range this.Checks { + repeatedStringForChecks += strings.Replace(f.String(), "Check", "Check", 1) + "," + } + repeatedStringForChecks += "}" + repeatedStringForReadinessChecks := "[]*Check{" + for _, f := range this.ReadinessChecks { + repeatedStringForReadinessChecks += strings.Replace(f.String(), "Check", "Check", 1) + "," + } + repeatedStringForReadinessChecks += "}" + s := strings.Join([]string{`&CheckDefinition{`, + `Checks:` + repeatedStringForChecks + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `ReadinessChecks:` + repeatedStringForReadinessChecks + `,`, + `}`, + }, "") + return s +} +func (this *Check) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Check{`, + `TcpCheck:` + strings.Replace(this.TcpCheck.String(), "TCPCheck", "TCPCheck", 1) + `,`, + `HttpCheck:` + strings.Replace(this.HttpCheck.String(), "HTTPCheck", "HTTPCheck", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TCPCheck) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPCheck{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `ConnectTimeoutMs:` + fmt.Sprintf("%v", this.ConnectTimeoutMs) + `,`, + `IntervalMs:` + fmt.Sprintf("%v", this.IntervalMs) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPCheck) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPCheck{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `RequestTimeoutMs:` + fmt.Sprintf("%v", this.RequestTimeoutMs) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `IntervalMs:` + fmt.Sprintf("%v", this.IntervalMs) + `,`, + `}`, + }, "") + return s +} +func valueToStringCheckDefinition(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CheckDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checks = append(m.Checks, &Check{}) + if err := m.Checks[len(m.Checks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessChecks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadinessChecks = append(m.ReadinessChecks, &Check{}) + if err := m.ReadinessChecks[len(m.ReadinessChecks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Check) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Check: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Check: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TcpCheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TcpCheck == nil { + m.TcpCheck = &TCPCheck{} + } + if err := m.TcpCheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpCheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpCheck == nil { + m.HttpCheck = &HTTPCheck{} + } + if err := m.HttpCheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPCheck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPCheck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPCheck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectTimeoutMs", wireType) + } + m.ConnectTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConnectTimeoutMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalMs", wireType) + } + m.IntervalMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntervalMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPCheck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPCheck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPCheck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTimeoutMs", wireType) + } + m.RequestTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestTimeoutMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalMs", wireType) + } + m.IntervalMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntervalMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCheckDefinition(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCheckDefinition + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCheckDefinition + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCheckDefinition + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCheckDefinition = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCheckDefinition = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCheckDefinition = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto b/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto new file mode 100644 index 00000000..e38338d5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto @@ -0,0 +1,33 @@ + +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message CheckDefinition { + repeated Check checks = 1; + string log_source = 2 [(gogoproto.jsontag) = "log_source"]; + repeated Check readiness_checks = 3; +} + +message Check { + // oneof is hard to use right now, instead we can do this check in validation + // oneof check { + TCPCheck tcp_check = 1; + HTTPCheck http_check = 2; + // } +} + +message TCPCheck { + uint32 port = 1 [(gogoproto.jsontag) = "port"]; + uint64 connect_timeout_ms = 2; + uint64 interval_ms = 3; +} + +message HTTPCheck { + uint32 port = 1 [(gogoproto.jsontag) = "port"]; + uint64 request_timeout_ms = 2; + string path = 3 [(gogoproto.jsontag) = "path"]; + uint64 interval_ms = 4; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go new file mode 100644 index 00000000..7a920ce9 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go @@ -0,0 +1,824 @@ +package models + +import ( + bytes "bytes" + "encoding/json" + "errors" + "net/url" + "regexp" + "time" + + "code.cloudfoundry.org/bbs/format" +) + +const PreloadedRootFSScheme = "preloaded" +const PreloadedOCIRootFSScheme = "preloaded+layer" + +const volumeMountedFilesMaxAllowedSize = 1 * 1024 * 1024 // 1MB in bytes + +var processGuidPattern = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +type DesiredLRPChange struct { + Before *DesiredLRP + After *DesiredLRP +} + +type DesiredLRPFilter struct { + Domain string + ProcessGuids []string +} + +func PreloadedRootFS(stack string) string { + return (&url.URL{ + Scheme: PreloadedRootFSScheme, + Opaque: stack, + }).String() +} + +func NewDesiredLRP(schedInfo DesiredLRPSchedulingInfo, runInfo DesiredLRPRunInfo, metricTags map[string]*MetricTagValue) DesiredLRP { + environmentVariables := make([]*EnvironmentVariable, len(runInfo.EnvironmentVariables)) + for i := range runInfo.EnvironmentVariables { + environmentVariables[i] = &runInfo.EnvironmentVariables[i] + } + + volumeMountedFiles := make([]*File, len(runInfo.VolumeMountedFiles)) + copy(volumeMountedFiles, runInfo.VolumeMountedFiles) + + egressRules := make([]*SecurityGroupRule, len(runInfo.EgressRules)) + for i := range runInfo.EgressRules { + egressRules[i] = &runInfo.EgressRules[i] + } + + return DesiredLRP{ + ProcessGuid: schedInfo.ProcessGuid, + Domain: schedInfo.Domain, + LogGuid: schedInfo.LogGuid, + MemoryMb: schedInfo.MemoryMb, + DiskMb: schedInfo.DiskMb, + MaxPids: schedInfo.MaxPids, + RootFs: schedInfo.RootFs, + Instances: schedInfo.Instances, + Annotation: schedInfo.Annotation, + Routes: &schedInfo.Routes, + ModificationTag: &schedInfo.ModificationTag, + EnvironmentVariables: environmentVariables, + CachedDependencies: runInfo.CachedDependencies, + Setup: runInfo.Setup, + Action: runInfo.Action, + Monitor: runInfo.Monitor, + StartTimeoutMs: runInfo.StartTimeoutMs, + Privileged: runInfo.Privileged, + CpuWeight: runInfo.CpuWeight, + Ports: runInfo.Ports, + EgressRules: egressRules, + LogSource: runInfo.LogSource, + MetricsGuid: runInfo.MetricsGuid, + LegacyDownloadUser: runInfo.LegacyDownloadUser, + TrustedSystemCertificatesPath: runInfo.TrustedSystemCertificatesPath, + VolumeMounts: runInfo.VolumeMounts, + Network: runInfo.Network, + PlacementTags: schedInfo.PlacementTags, + CertificateProperties: runInfo.CertificateProperties, + ImageUsername: runInfo.ImageUsername, + ImagePassword: runInfo.ImagePassword, + CheckDefinition: runInfo.CheckDefinition, + ImageLayers: runInfo.ImageLayers, + MetricTags: metricTags, + Sidecars: runInfo.Sidecars, + LogRateLimit: runInfo.LogRateLimit, + VolumeMountedFiles: volumeMountedFiles, + } +} + +func (desiredLRP *DesiredLRP) AddRunInfo(runInfo DesiredLRPRunInfo) { + environmentVariables := make([]*EnvironmentVariable, len(runInfo.EnvironmentVariables)) + for i := range runInfo.EnvironmentVariables { + environmentVariables[i] = &runInfo.EnvironmentVariables[i] + } + + volumeMountedFiles := make([]*File, len(runInfo.VolumeMountedFiles)) + copy(volumeMountedFiles, runInfo.VolumeMountedFiles) + + egressRules := make([]*SecurityGroupRule, len(runInfo.EgressRules)) + for i := range runInfo.EgressRules { + egressRules[i] = &runInfo.EgressRules[i] + } + + desiredLRP.EnvironmentVariables = environmentVariables + desiredLRP.CachedDependencies = runInfo.CachedDependencies + desiredLRP.Setup = runInfo.Setup + desiredLRP.Action = runInfo.Action + desiredLRP.Monitor = runInfo.Monitor + desiredLRP.StartTimeoutMs = runInfo.StartTimeoutMs + desiredLRP.Privileged = runInfo.Privileged + desiredLRP.CpuWeight = runInfo.CpuWeight + desiredLRP.Ports = runInfo.Ports + desiredLRP.EgressRules = egressRules + desiredLRP.LogSource = runInfo.LogSource + desiredLRP.MetricsGuid = runInfo.MetricsGuid + desiredLRP.LegacyDownloadUser = runInfo.LegacyDownloadUser + desiredLRP.TrustedSystemCertificatesPath = runInfo.TrustedSystemCertificatesPath + desiredLRP.VolumeMounts = runInfo.VolumeMounts + desiredLRP.Network = runInfo.Network + desiredLRP.CheckDefinition = runInfo.CheckDefinition + desiredLRP.VolumeMountedFiles = volumeMountedFiles +} + +func (*DesiredLRP) Version() format.Version { + return format.V3 +} + +func (d *DesiredLRP) actionsFromCachedDependencies() []ActionInterface { + actions := make([]ActionInterface, len(d.CachedDependencies)) + for i := range d.CachedDependencies { + cacheDependency := d.CachedDependencies[i] + actions[i] = &DownloadAction{ + Artifact: cacheDependency.Name, + From: cacheDependency.From, + To: cacheDependency.To, + CacheKey: cacheDependency.CacheKey, + LogSource: cacheDependency.LogSource, + User: d.LegacyDownloadUser, + } + } + return actions +} + +func newDesiredLRPWithCachedDependenciesAsSetupActions(d *DesiredLRP) *DesiredLRP { + d = d.Copy() + if len(d.CachedDependencies) > 0 { + + cachedDownloads := Parallel(d.actionsFromCachedDependencies()...) + + if d.Setup != nil { + d.Setup = WrapAction(Serial(cachedDownloads, UnwrapAction(d.Setup))) + } else { + d.Setup = WrapAction(Serial(cachedDownloads)) + } + d.CachedDependencies = nil + } + + return d +} + +func downgradeDesiredLRPV2ToV1(d *DesiredLRP) *DesiredLRP { + return d +} + +func downgradeDesiredLRPV1ToV0(d *DesiredLRP) *DesiredLRP { + d.Action = d.Action.SetDeprecatedTimeoutNs() + d.Setup = d.Setup.SetDeprecatedTimeoutNs() + d.Monitor = d.Monitor.SetDeprecatedTimeoutNs() + d.DeprecatedStartTimeoutS = uint32(d.StartTimeoutMs) / 1000 + return newDesiredLRPWithCachedDependenciesAsSetupActions(d) +} + +func downgradeDesiredLRPV3ToV2(d *DesiredLRP) *DesiredLRP { + layers := ImageLayers(d.ImageLayers) + + d.CachedDependencies = append(layers.ToCachedDependencies(), d.CachedDependencies...) + d.Setup = layers.ToDownloadActions(d.LegacyDownloadUser, d.Setup) + d.ImageLayers = nil + + return d +} + +var downgrades = []func(*DesiredLRP) *DesiredLRP{ + downgradeDesiredLRPV1ToV0, + downgradeDesiredLRPV2ToV1, + downgradeDesiredLRPV3ToV2, +} + +func (d *DesiredLRP) VersionDownTo(v format.Version) *DesiredLRP { + versionedLRP := d.Copy() + + for version := d.Version(); version > v; version-- { + versionedLRP = downgrades[version-1](versionedLRP) + } + + return versionedLRP +} + +func (d *DesiredLRP) PopulateMetricsGuid() *DesiredLRP { + sourceId, sourceIDIsSet := d.MetricTags["source_id"] + switch { + case sourceIDIsSet && d.MetricsGuid == "": + d.MetricsGuid = sourceId.Static + case !sourceIDIsSet && d.MetricsGuid != "": + if d.MetricTags == nil { + d.MetricTags = make(map[string]*MetricTagValue) + } + d.MetricTags["source_id"] = &MetricTagValue{ + Static: d.MetricsGuid, + } + } + return d +} + +func (d *DesiredLRP) DesiredLRPKey() DesiredLRPKey { + return NewDesiredLRPKey(d.ProcessGuid, d.Domain, d.LogGuid) +} + +func (d *DesiredLRP) DesiredLRPResource() DesiredLRPResource { + return NewDesiredLRPResource(d.MemoryMb, d.DiskMb, d.MaxPids, d.RootFs) +} + +func (d *DesiredLRP) DesiredLRPSchedulingInfo() DesiredLRPSchedulingInfo { + var routes Routes + if d.Routes != nil { + routes = *d.Routes + } + var modificationTag ModificationTag + if d.ModificationTag != nil { + modificationTag = *d.ModificationTag + } + + var volumePlacement VolumePlacement + volumePlacement.DriverNames = []string{} + for _, mount := range d.VolumeMounts { + volumePlacement.DriverNames = append(volumePlacement.DriverNames, mount.Driver) + } + + return NewDesiredLRPSchedulingInfo( + d.DesiredLRPKey(), + d.Annotation, + d.Instances, + d.DesiredLRPResource(), + routes, + modificationTag, + &volumePlacement, + d.PlacementTags, + ) +} + +func (d *DesiredLRP) DesiredLRPRoutingInfo() DesiredLRP { + var routes Routes + if d.Routes != nil { + routes = *d.Routes + } + + var modificationTag ModificationTag + if d.ModificationTag != nil { + modificationTag = *d.ModificationTag + } + + return NewDesiredLRPRoutingInfo( + d.DesiredLRPKey(), + d.Instances, + &routes, + &modificationTag, + d.MetricTags, + ) +} + +func (d *DesiredLRP) DesiredLRPRunInfo(createdAt time.Time) DesiredLRPRunInfo { + environmentVariables := make([]EnvironmentVariable, len(d.EnvironmentVariables)) + for i := range d.EnvironmentVariables { + environmentVariables[i] = *d.EnvironmentVariables[i] + } + + volumeMountedFiles := make([]*File, len(d.VolumeMountedFiles)) + copy(volumeMountedFiles, d.VolumeMountedFiles) + + egressRules := make([]SecurityGroupRule, len(d.EgressRules)) + for i := range d.EgressRules { + egressRules[i] = *d.EgressRules[i] + } + + return NewDesiredLRPRunInfo( + d.DesiredLRPKey(), + createdAt, + environmentVariables, + d.CachedDependencies, + d.Setup, + d.Action, + d.Monitor, + d.StartTimeoutMs, + d.Privileged, + d.CpuWeight, + d.Ports, + egressRules, + d.LogSource, + d.MetricsGuid, + d.LegacyDownloadUser, + d.TrustedSystemCertificatesPath, + d.VolumeMounts, + d.Network, + d.CertificateProperties, + d.ImageUsername, + d.ImagePassword, + d.CheckDefinition, + d.ImageLayers, + d.Sidecars, + d.LogRateLimit, + volumeMountedFiles, + ) +} + +func (d *DesiredLRP) Copy() *DesiredLRP { + newDesired := *d + return &newDesired +} + +func (desired DesiredLRP) Validate() error { + var validationError ValidationError + + if len(desired.VolumeMountedFiles) > 0 { + err := validateVolumeMountedFiles(desired.VolumeMountedFiles) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"volumeMountedFiles"}) + } + } + + if desired.GetDomain() == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if desired.GetRootFs() == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + rootFSURL, err := url.Parse(desired.GetRootFs()) + if err != nil || rootFSURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + if desired.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if desired.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if desired.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if limit := desired.GetLogRateLimit(); limit != nil { + if limit.GetBytesPerSecond() < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit_bytes_per_second"}) + } + } + + if len(desired.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + if desired.GetMaxPids() < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + totalRoutesLength := 0 + if desired.Routes != nil { + for _, value := range *desired.Routes { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + validationError = validationError.Append(ErrInvalidField{"routes"}) + break + } + } + } + + if desired.MetricTags == nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + } else { + err := validateMetricTags(desired.MetricTags, desired.GetMetricsGuid()) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + validationError = validationError.Append(err) + } + } + + runInfoErrors := desired.DesiredLRPRunInfo(time.Now()).Validate() + if runInfoErrors != nil { + validationError = validationError.Append(runInfoErrors) + } + + return validationError.ToError() +} + +func (desired *DesiredLRPUpdate) Validate() error { + var validationError ValidationError + + if desired.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if len(desired.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + totalRoutesLength := 0 + if desired.Routes != nil { + for _, value := range *desired.Routes { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + validationError = validationError.Append(ErrInvalidField{"routes"}) + break + } + } + } + + err := validateMetricTags(desired.MetricTags, "") + if err != nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + validationError = validationError.Append(err) + } + + return validationError.ToError() +} + +func (desired *DesiredLRPUpdate) SetInstances(instances int32) { + desired.OptionalInstances = &DesiredLRPUpdate_Instances{ + Instances: instances, + } +} + +func (desired DesiredLRPUpdate) InstancesExists() bool { + _, ok := desired.GetOptionalInstances().(*DesiredLRPUpdate_Instances) + return ok +} + +func (desired *DesiredLRPUpdate) SetAnnotation(annotation string) { + desired.OptionalAnnotation = &DesiredLRPUpdate_Annotation{ + Annotation: annotation, + } +} + +func (desired DesiredLRPUpdate) AnnotationExists() bool { + _, ok := desired.GetOptionalAnnotation().(*DesiredLRPUpdate_Annotation) + return ok +} + +func (desired DesiredLRPUpdate) IsRoutesGroupUpdated(routes *Routes, routerGroup string) bool { + if desired.Routes == nil { + return false + } + + if routes == nil { + return true + } + + desiredRoutes, desiredRoutesPresent := (*desired.Routes)[routerGroup] + requestRoutes, requestRoutesPresent := (*routes)[routerGroup] + if desiredRoutesPresent != requestRoutesPresent { + return true + } + + if desiredRoutesPresent && requestRoutesPresent { + return !bytes.Equal(*desiredRoutes, *requestRoutes) + } + + return true +} + +func (desired DesiredLRPUpdate) IsMetricTagsUpdated(existingTags map[string]*MetricTagValue) bool { + if desired.MetricTags == nil { + return false + } + if len(desired.MetricTags) != len(existingTags) { + return true + } + for k, v := range existingTags { + updateTag, ok := desired.MetricTags[k] + if !ok { + return true + } + if updateTag.Static != v.Static || updateTag.Dynamic != v.Dynamic { + return true + } + } + return false +} + +type internalDesiredLRPUpdate struct { + Instances *int32 `json:"instances,omitempty"` + Routes *Routes `json:"routes,omitempty"` + Annotation *string `json:"annotation,omitempty"` + MetricTags map[string]*MetricTagValue `json:"metric_tags,omitempty"` +} + +func (desired *DesiredLRPUpdate) UnmarshalJSON(data []byte) error { + var update internalDesiredLRPUpdate + if err := json.Unmarshal(data, &update); err != nil { + return err + } + + if update.Instances != nil { + desired.SetInstances(*update.Instances) + } + desired.Routes = update.Routes + if update.Annotation != nil { + desired.SetAnnotation(*update.Annotation) + } + desired.MetricTags = update.MetricTags + + return nil +} + +func (desired DesiredLRPUpdate) MarshalJSON() ([]byte, error) { + var update internalDesiredLRPUpdate + if desired.InstancesExists() { + i := desired.GetInstances() + update.Instances = &i + } + update.Routes = desired.Routes + if desired.AnnotationExists() { + a := desired.GetAnnotation() + update.Annotation = &a + } + update.MetricTags = desired.MetricTags + return json.Marshal(update) +} + +func NewDesiredLRPKey(processGuid, domain, logGuid string) DesiredLRPKey { + return DesiredLRPKey{ + ProcessGuid: processGuid, + Domain: domain, + LogGuid: logGuid, + } +} + +func (key DesiredLRPKey) Validate() error { + var validationError ValidationError + if key.GetDomain() == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !processGuidPattern.MatchString(key.GetProcessGuid()) { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPSchedulingInfo( + key DesiredLRPKey, + annotation string, + instances int32, + resource DesiredLRPResource, + routes Routes, + modTag ModificationTag, + volumePlacement *VolumePlacement, + placementTags []string, +) DesiredLRPSchedulingInfo { + return DesiredLRPSchedulingInfo{ + DesiredLRPKey: key, + Annotation: annotation, + Instances: instances, + DesiredLRPResource: resource, + Routes: routes, + ModificationTag: modTag, + VolumePlacement: volumePlacement, + PlacementTags: placementTags, + } +} + +func NewDesiredLRPRoutingInfo( + key DesiredLRPKey, + instances int32, + routes *Routes, + modTag *ModificationTag, + metrTags map[string]*MetricTagValue, +) DesiredLRP { + return DesiredLRP{ + ProcessGuid: key.ProcessGuid, + Domain: key.Domain, + LogGuid: key.LogGuid, + Instances: instances, + Routes: routes, + ModificationTag: modTag, + MetricTags: metrTags, + } +} + +func (s *DesiredLRPSchedulingInfo) ApplyUpdate(update *DesiredLRPUpdate) { + if update.InstancesExists() { + s.Instances = update.GetInstances() + } + if update.Routes != nil { + s.Routes = *update.Routes + } + if update.AnnotationExists() { + s.Annotation = update.GetAnnotation() + } + s.ModificationTag.Increment() +} + +func (*DesiredLRPSchedulingInfo) Version() format.Version { + return format.V0 +} + +func (s DesiredLRPSchedulingInfo) Validate() error { + var validationError ValidationError + + validationError = validationError.Check(s.DesiredLRPKey, s.DesiredLRPResource, s.Routes) + + if s.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if len(s.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPResource(memoryMb, diskMb, maxPids int32, rootFs string) DesiredLRPResource { + return DesiredLRPResource{ + MemoryMb: memoryMb, + DiskMb: diskMb, + MaxPids: maxPids, + RootFs: rootFs, + } +} + +func (resource DesiredLRPResource) Validate() error { + var validationError ValidationError + + rootFSURL, err := url.Parse(resource.GetRootFs()) + if err != nil || rootFSURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + if resource.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if resource.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if resource.GetMaxPids() < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPRunInfo( + key DesiredLRPKey, + createdAt time.Time, + envVars []EnvironmentVariable, + cacheDeps []*CachedDependency, + setup, + action, + monitor *Action, + startTimeoutMs int64, + privileged bool, + cpuWeight uint32, + ports []uint32, + egressRules []SecurityGroupRule, + logSource, + metricsGuid string, + legacyDownloadUser string, + trustedSystemCertificatesPath string, + volumeMounts []*VolumeMount, + network *Network, + certificateProperties *CertificateProperties, + imageUsername, imagePassword string, + checkDefinition *CheckDefinition, + imageLayers []*ImageLayer, + sidecars []*Sidecar, + logRateLimit *LogRateLimit, + volumeMountedFiles []*File, +) DesiredLRPRunInfo { + return DesiredLRPRunInfo{ + DesiredLRPKey: key, + CreatedAt: createdAt.UnixNano(), + EnvironmentVariables: envVars, + CachedDependencies: cacheDeps, + Setup: setup, + Action: action, + Monitor: monitor, + StartTimeoutMs: startTimeoutMs, + Privileged: privileged, + CpuWeight: cpuWeight, + Ports: ports, + EgressRules: egressRules, + LogSource: logSource, + MetricsGuid: metricsGuid, + LegacyDownloadUser: legacyDownloadUser, + TrustedSystemCertificatesPath: trustedSystemCertificatesPath, + VolumeMounts: volumeMounts, + Network: network, + CertificateProperties: certificateProperties, + ImageUsername: imageUsername, + ImagePassword: imagePassword, + CheckDefinition: checkDefinition, + ImageLayers: imageLayers, + Sidecars: sidecars, + LogRateLimit: logRateLimit, + VolumeMountedFiles: volumeMountedFiles, + } +} + +func (runInfo DesiredLRPRunInfo) Validate() error { + var validationError ValidationError + + validationError = validationError.Check(runInfo.DesiredLRPKey) + + if len(runInfo.VolumeMountedFiles) > 0 { + err := validateVolumeMountedFiles(runInfo.VolumeMountedFiles) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"volumeMountedFiles"}) + } + } + + if runInfo.Setup != nil { + if err := runInfo.Setup.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"setup"}) + validationError = validationError.Append(err) + } + } + + if runInfo.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := runInfo.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if runInfo.Monitor != nil { + if err := runInfo.Monitor.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"monitor"}) + validationError = validationError.Append(err) + } + } + + for _, envVar := range runInfo.EnvironmentVariables { + validationError = validationError.Check(envVar) + } + + for _, rule := range runInfo.EgressRules { + err := rule.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"egress_rules"}) + validationError = validationError.Append(err) + } + } + + err := validateCachedDependencies(runInfo.CachedDependencies) + if err != nil { + validationError = validationError.Append(err) + } + + err = validateImageLayers(runInfo.ImageLayers, runInfo.LegacyDownloadUser) + if err != nil { + validationError = validationError.Append(err) + } + + err = validateSidecars(runInfo.Sidecars) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"sidecars"}) + validationError = validationError.Append(err) + } + + for _, mount := range runInfo.VolumeMounts { + validationError = validationError.Check(mount) + } + + if runInfo.ImageUsername == "" && runInfo.ImagePassword != "" { + validationError = validationError.Append(ErrInvalidField{"image_username"}) + } + + if runInfo.ImageUsername != "" && runInfo.ImagePassword == "" { + validationError = validationError.Append(ErrInvalidField{"image_password"}) + } + + if runInfo.CheckDefinition != nil { + if err := runInfo.CheckDefinition.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"check_definition"}) + validationError = validationError.Append(err) + } + } + + if limit := runInfo.LogRateLimit; limit != nil { + if limit.BytesPerSecond < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit"}) + } + } + + return validationError.ToError() +} + +func (*CertificateProperties) Version() format.Version { + return format.V0 +} + +func (CertificateProperties) Validate() error { + return nil +} + +func validateVolumeMountedFiles(files []*File) error { + var totalSize int + for _, file := range files { + totalSize += len(file.Content) + if totalSize > volumeMountedFilesMaxAllowedSize { + return errors.New("total size of all file values exceeds 1MB") + } + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go new file mode 100644 index 00000000..123c9d8e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go @@ -0,0 +1,7407 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: desired_lrp.proto + +package models + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DesiredLRPSchedulingInfo struct { + DesiredLRPKey `protobuf:"bytes,1,opt,name=desired_lrp_key,json=desiredLrpKey,proto3,embedded=desired_lrp_key" json:""` + Annotation string `protobuf:"bytes,2,opt,name=annotation,proto3" json:"annotation"` + Instances int32 `protobuf:"varint,3,opt,name=instances,proto3" json:"instances"` + DesiredLRPResource `protobuf:"bytes,4,opt,name=desired_lrp_resource,json=desiredLrpResource,proto3,embedded=desired_lrp_resource" json:""` + Routes Routes `protobuf:"bytes,5,opt,name=routes,proto3,customtype=Routes" json:"routes"` + ModificationTag `protobuf:"bytes,6,opt,name=modification_tag,json=modificationTag,proto3,embedded=modification_tag" json:""` + VolumePlacement *VolumePlacement `protobuf:"bytes,7,opt,name=volume_placement,json=volumePlacement,proto3" json:"volume_placement,omitempty"` + PlacementTags []string `protobuf:"bytes,8,rep,name=PlacementTags,proto3" json:"placement_tags,omitempty"` +} + +func (m *DesiredLRPSchedulingInfo) Reset() { *m = DesiredLRPSchedulingInfo{} } +func (*DesiredLRPSchedulingInfo) ProtoMessage() {} +func (*DesiredLRPSchedulingInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{0} +} +func (m *DesiredLRPSchedulingInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfo.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfo) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfo proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfo) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *DesiredLRPSchedulingInfo) GetInstances() int32 { + if m != nil { + return m.Instances + } + return 0 +} + +func (m *DesiredLRPSchedulingInfo) GetVolumePlacement() *VolumePlacement { + if m != nil { + return m.VolumePlacement + } + return nil +} + +func (m *DesiredLRPSchedulingInfo) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +type DesiredLRPRunInfo struct { + DesiredLRPKey `protobuf:"bytes,1,opt,name=desired_lrp_key,json=desiredLrpKey,proto3,embedded=desired_lrp_key" json:""` + EnvironmentVariables []EnvironmentVariable `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables,proto3" json:"env"` + Setup *Action `protobuf:"bytes,3,opt,name=setup,proto3" json:"setup,omitempty"` + Action *Action `protobuf:"bytes,4,opt,name=action,proto3" json:"action,omitempty"` + Monitor *Action `protobuf:"bytes,5,opt,name=monitor,proto3" json:"monitor,omitempty"` + DeprecatedStartTimeoutS uint32 `protobuf:"varint,6,opt,name=deprecated_start_timeout_s,json=deprecatedStartTimeoutS,proto3" json:"start_timeout,omitempty"` // Deprecated: Do not use. + Privileged bool `protobuf:"varint,7,opt,name=privileged,proto3" json:"privileged"` + CpuWeight uint32 `protobuf:"varint,8,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Ports []uint32 `protobuf:"varint,9,rep,name=ports,proto3" json:"ports,omitempty"` + EgressRules []SecurityGroupRule `protobuf:"bytes,10,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules"` + LogSource string `protobuf:"bytes,11,opt,name=log_source,json=logSource,proto3" json:"log_source"` + MetricsGuid string `protobuf:"bytes,12,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` // Deprecated: Do not use. + CreatedAt int64 `protobuf:"varint,13,opt,name=created_at,json=createdAt,proto3" json:"created_at"` + CachedDependencies []*CachedDependency `protobuf:"bytes,14,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,15,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,16,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,17,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,18,opt,name=network,proto3" json:"network,omitempty"` + StartTimeoutMs int64 `protobuf:"varint,19,opt,name=start_timeout_ms,json=startTimeoutMs,proto3" json:"start_timeout_ms"` + CertificateProperties *CertificateProperties `protobuf:"bytes,20,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,21,opt,name=image_username,json=imageUsername,proto3" json:"image_username,omitempty"` + ImagePassword string `protobuf:"bytes,22,opt,name=image_password,json=imagePassword,proto3" json:"image_password,omitempty"` + CheckDefinition *CheckDefinition `protobuf:"bytes,23,opt,name=check_definition,json=checkDefinition,proto3" json:"check_definition,omitempty"` + ImageLayers []*ImageLayer `protobuf:"bytes,24,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,25,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + Sidecars []*Sidecar `protobuf:"bytes,26,rep,name=sidecars,proto3" json:"sidecars,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,27,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` + VolumeMountedFiles []*File `protobuf:"bytes,28,rep,name=volume_mounted_files,json=volumeMountedFiles,proto3" json:"volume_mounted_files"` +} + +func (m *DesiredLRPRunInfo) Reset() { *m = DesiredLRPRunInfo{} } +func (*DesiredLRPRunInfo) ProtoMessage() {} +func (*DesiredLRPRunInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{1} +} +func (m *DesiredLRPRunInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPRunInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPRunInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPRunInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPRunInfo.Merge(m, src) +} +func (m *DesiredLRPRunInfo) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPRunInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPRunInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPRunInfo proto.InternalMessageInfo + +func (m *DesiredLRPRunInfo) GetEnvironmentVariables() []EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *DesiredLRPRunInfo) GetSetup() *Action { + if m != nil { + return m.Setup + } + return nil +} + +func (m *DesiredLRPRunInfo) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *DesiredLRPRunInfo) GetMonitor() *Action { + if m != nil { + return m.Monitor + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetDeprecatedStartTimeoutS() uint32 { + if m != nil { + return m.DeprecatedStartTimeoutS + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *DesiredLRPRunInfo) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *DesiredLRPRunInfo) GetEgressRules() []SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *DesiredLRPRunInfo) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *DesiredLRPRunInfo) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *DesiredLRPRunInfo) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *DesiredLRPRunInfo) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *DesiredLRPRunInfo) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *DesiredLRPRunInfo) GetStartTimeoutMs() int64 { + if m != nil { + return m.StartTimeoutMs + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *DesiredLRPRunInfo) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *DesiredLRPRunInfo) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *DesiredLRPRunInfo) GetCheckDefinition() *CheckDefinition { + if m != nil { + return m.CheckDefinition + } + return nil +} + +func (m *DesiredLRPRunInfo) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *DesiredLRPRunInfo) GetSidecars() []*Sidecar { + if m != nil { + return m.Sidecars + } + return nil +} + +func (m *DesiredLRPRunInfo) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +func (m *DesiredLRPRunInfo) GetVolumeMountedFiles() []*File { + if m != nil { + return m.VolumeMountedFiles + } + return nil +} + +// helper message for marshalling routes +type ProtoRoutes struct { + Routes map[string][]byte `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ProtoRoutes) Reset() { *m = ProtoRoutes{} } +func (*ProtoRoutes) ProtoMessage() {} +func (*ProtoRoutes) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{2} +} +func (m *ProtoRoutes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtoRoutes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtoRoutes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtoRoutes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoRoutes.Merge(m, src) +} +func (m *ProtoRoutes) XXX_Size() int { + return m.Size() +} +func (m *ProtoRoutes) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoRoutes.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoRoutes proto.InternalMessageInfo + +func (m *ProtoRoutes) GetRoutes() map[string][]byte { + if m != nil { + return m.Routes + } + return nil +} + +type DesiredLRPUpdate struct { + // Types that are valid to be assigned to OptionalInstances: + // + // *DesiredLRPUpdate_Instances + OptionalInstances isDesiredLRPUpdate_OptionalInstances `protobuf_oneof:"optional_instances"` + Routes *Routes `protobuf:"bytes,2,opt,name=routes,proto3,customtype=Routes" json:"routes,omitempty"` + // Types that are valid to be assigned to OptionalAnnotation: + // + // *DesiredLRPUpdate_Annotation + OptionalAnnotation isDesiredLRPUpdate_OptionalAnnotation `protobuf_oneof:"optional_annotation"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,4,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *DesiredLRPUpdate) Reset() { *m = DesiredLRPUpdate{} } +func (*DesiredLRPUpdate) ProtoMessage() {} +func (*DesiredLRPUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{3} +} +func (m *DesiredLRPUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPUpdate.Merge(m, src) +} +func (m *DesiredLRPUpdate) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPUpdate proto.InternalMessageInfo + +type isDesiredLRPUpdate_OptionalInstances interface { + isDesiredLRPUpdate_OptionalInstances() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} +type isDesiredLRPUpdate_OptionalAnnotation interface { + isDesiredLRPUpdate_OptionalAnnotation() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type DesiredLRPUpdate_Instances struct { + Instances int32 `protobuf:"varint,1,opt,name=instances,proto3,oneof" json:"instances,omitempty"` +} +type DesiredLRPUpdate_Annotation struct { + Annotation string `protobuf:"bytes,3,opt,name=annotation,proto3,oneof" json:"annotation,omitempty"` +} + +func (*DesiredLRPUpdate_Instances) isDesiredLRPUpdate_OptionalInstances() {} +func (*DesiredLRPUpdate_Annotation) isDesiredLRPUpdate_OptionalAnnotation() {} + +func (m *DesiredLRPUpdate) GetOptionalInstances() isDesiredLRPUpdate_OptionalInstances { + if m != nil { + return m.OptionalInstances + } + return nil +} +func (m *DesiredLRPUpdate) GetOptionalAnnotation() isDesiredLRPUpdate_OptionalAnnotation { + if m != nil { + return m.OptionalAnnotation + } + return nil +} + +func (m *DesiredLRPUpdate) GetInstances() int32 { + if x, ok := m.GetOptionalInstances().(*DesiredLRPUpdate_Instances); ok { + return x.Instances + } + return 0 +} + +func (m *DesiredLRPUpdate) GetAnnotation() string { + if x, ok := m.GetOptionalAnnotation().(*DesiredLRPUpdate_Annotation); ok { + return x.Annotation + } + return "" +} + +func (m *DesiredLRPUpdate) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DesiredLRPUpdate) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DesiredLRPUpdate_Instances)(nil), + (*DesiredLRPUpdate_Annotation)(nil), + } +} + +type DesiredLRPKey struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain"` + LogGuid string `protobuf:"bytes,3,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` +} + +func (m *DesiredLRPKey) Reset() { *m = DesiredLRPKey{} } +func (*DesiredLRPKey) ProtoMessage() {} +func (*DesiredLRPKey) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{4} +} +func (m *DesiredLRPKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPKey.Merge(m, src) +} +func (m *DesiredLRPKey) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPKey) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPKey.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPKey proto.InternalMessageInfo + +func (m *DesiredLRPKey) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *DesiredLRPKey) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRPKey) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +type DesiredLRPResource struct { + MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + RootFs string `protobuf:"bytes,3,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + MaxPids int32 `protobuf:"varint,4,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` +} + +func (m *DesiredLRPResource) Reset() { *m = DesiredLRPResource{} } +func (*DesiredLRPResource) ProtoMessage() {} +func (*DesiredLRPResource) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{5} +} +func (m *DesiredLRPResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPResource.Merge(m, src) +} +func (m *DesiredLRPResource) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPResource) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPResource.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPResource proto.InternalMessageInfo + +func (m *DesiredLRPResource) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *DesiredLRPResource) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *DesiredLRPResource) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *DesiredLRPResource) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +type DesiredLRP struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain"` + RootFs string `protobuf:"bytes,3,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + Instances int32 `protobuf:"varint,4,opt,name=instances,proto3" json:"instances"` + EnvironmentVariables []*EnvironmentVariable `protobuf:"bytes,5,rep,name=environment_variables,json=environmentVariables,proto3" json:"env"` + Setup *Action `protobuf:"bytes,6,opt,name=setup,proto3" json:"setup,omitempty"` + Action *Action `protobuf:"bytes,7,opt,name=action,proto3" json:"action,omitempty"` + StartTimeoutMs int64 `protobuf:"varint,27,opt,name=start_timeout_ms,json=startTimeoutMs,proto3" json:"start_timeout_ms"` + DeprecatedStartTimeoutS uint32 `protobuf:"varint,8,opt,name=deprecated_start_timeout_s,json=deprecatedStartTimeoutS,proto3" json:"deprecated_timeout_ns,omitempty"` // Deprecated: Do not use. + Monitor *Action `protobuf:"bytes,9,opt,name=monitor,proto3" json:"monitor,omitempty"` + DiskMb int32 `protobuf:"varint,10,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,11,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + CpuWeight uint32 `protobuf:"varint,12,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Privileged bool `protobuf:"varint,13,opt,name=privileged,proto3" json:"privileged"` + Ports []uint32 `protobuf:"varint,14,rep,name=ports,proto3" json:"ports,omitempty"` + Routes *Routes `protobuf:"bytes,15,opt,name=routes,proto3,customtype=Routes" json:"routes,omitempty"` + LogSource string `protobuf:"bytes,16,opt,name=log_source,json=logSource,proto3" json:"log_source"` + LogGuid string `protobuf:"bytes,17,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` + MetricsGuid string `protobuf:"bytes,18,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` // Deprecated: Do not use. + Annotation string `protobuf:"bytes,19,opt,name=annotation,proto3" json:"annotation"` + EgressRules []*SecurityGroupRule `protobuf:"bytes,20,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules,omitempty"` + ModificationTag *ModificationTag `protobuf:"bytes,21,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag,omitempty"` + CachedDependencies []*CachedDependency `protobuf:"bytes,22,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,23,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,24,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,25,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,26,opt,name=network,proto3" json:"network,omitempty"` + PlacementTags []string `protobuf:"bytes,28,rep,name=PlacementTags,proto3" json:"placement_tags,omitempty"` + MaxPids int32 `protobuf:"varint,29,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` + CertificateProperties *CertificateProperties `protobuf:"bytes,30,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,31,opt,name=image_username,json=imageUsername,proto3" json:"image_username,omitempty"` + ImagePassword string `protobuf:"bytes,32,opt,name=image_password,json=imagePassword,proto3" json:"image_password,omitempty"` + CheckDefinition *CheckDefinition `protobuf:"bytes,33,opt,name=check_definition,json=checkDefinition,proto3" json:"check_definition,omitempty"` + ImageLayers []*ImageLayer `protobuf:"bytes,34,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,35,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Sidecars []*Sidecar `protobuf:"bytes,36,rep,name=sidecars,proto3" json:"sidecars,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,37,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` + VolumeMountedFiles []*File `protobuf:"bytes,38,rep,name=volume_mounted_files,json=volumeMountedFiles,proto3" json:"volume_mounted_files"` +} + +func (m *DesiredLRP) Reset() { *m = DesiredLRP{} } +func (*DesiredLRP) ProtoMessage() {} +func (*DesiredLRP) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{6} +} +func (m *DesiredLRP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRP) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRP.Merge(m, src) +} +func (m *DesiredLRP) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRP) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRP.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRP proto.InternalMessageInfo + +func (m *DesiredLRP) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *DesiredLRP) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRP) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *DesiredLRP) GetInstances() int32 { + if m != nil { + return m.Instances + } + return 0 +} + +func (m *DesiredLRP) GetEnvironmentVariables() []*EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *DesiredLRP) GetSetup() *Action { + if m != nil { + return m.Setup + } + return nil +} + +func (m *DesiredLRP) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *DesiredLRP) GetStartTimeoutMs() int64 { + if m != nil { + return m.StartTimeoutMs + } + return 0 +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetDeprecatedStartTimeoutS() uint32 { + if m != nil { + return m.DeprecatedStartTimeoutS + } + return 0 +} + +func (m *DesiredLRP) GetMonitor() *Action { + if m != nil { + return m.Monitor + } + return nil +} + +func (m *DesiredLRP) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *DesiredLRP) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *DesiredLRP) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *DesiredLRP) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *DesiredLRP) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *DesiredLRP) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *DesiredLRP) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *DesiredLRP) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *DesiredLRP) GetEgressRules() []*SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *DesiredLRP) GetModificationTag() *ModificationTag { + if m != nil { + return m.ModificationTag + } + return nil +} + +func (m *DesiredLRP) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *DesiredLRP) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *DesiredLRP) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *DesiredLRP) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *DesiredLRP) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *DesiredLRP) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +func (m *DesiredLRP) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *DesiredLRP) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *DesiredLRP) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *DesiredLRP) GetCheckDefinition() *CheckDefinition { + if m != nil { + return m.CheckDefinition + } + return nil +} + +func (m *DesiredLRP) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +func (m *DesiredLRP) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *DesiredLRP) GetSidecars() []*Sidecar { + if m != nil { + return m.Sidecars + } + return nil +} + +func (m *DesiredLRP) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +func (m *DesiredLRP) GetVolumeMountedFiles() []*File { + if m != nil { + return m.VolumeMountedFiles + } + return nil +} + +func init() { + proto.RegisterType((*DesiredLRPSchedulingInfo)(nil), "models.DesiredLRPSchedulingInfo") + proto.RegisterType((*DesiredLRPRunInfo)(nil), "models.DesiredLRPRunInfo") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRPRunInfo.MetricTagsEntry") + proto.RegisterType((*ProtoRoutes)(nil), "models.ProtoRoutes") + proto.RegisterMapType((map[string][]byte)(nil), "models.ProtoRoutes.RoutesEntry") + proto.RegisterType((*DesiredLRPUpdate)(nil), "models.DesiredLRPUpdate") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRPUpdate.MetricTagsEntry") + proto.RegisterType((*DesiredLRPKey)(nil), "models.DesiredLRPKey") + proto.RegisterType((*DesiredLRPResource)(nil), "models.DesiredLRPResource") + proto.RegisterType((*DesiredLRP)(nil), "models.DesiredLRP") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRP.MetricTagsEntry") +} + +func init() { proto.RegisterFile("desired_lrp.proto", fileDescriptor_f592e9299b63d68c) } + +var fileDescriptor_f592e9299b63d68c = []byte{ + // 1845 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x98, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xc0, 0xb9, 0xfa, 0x43, 0x8a, 0x43, 0x52, 0xa2, 0x46, 0x94, 0x34, 0xa6, 0x6d, 0x2e, 0xcb, + 0xd8, 0x29, 0xd3, 0x24, 0x0a, 0xe0, 0xa4, 0x68, 0x9a, 0x16, 0x05, 0xb2, 0x76, 0xe2, 0x18, 0x96, + 0x02, 0x61, 0x64, 0xbb, 0x6d, 0x80, 0x62, 0xb1, 0xda, 0x1d, 0xad, 0x16, 0xde, 0xdd, 0x59, 0xec, + 0xcc, 0xca, 0xe1, 0xad, 0xfd, 0x06, 0xed, 0xa9, 0x5f, 0xa1, 0x1f, 0xa0, 0x40, 0xbf, 0x42, 0x8e, + 0x3e, 0x06, 0x3d, 0x10, 0xb1, 0x7c, 0x29, 0x78, 0xca, 0x47, 0x28, 0x66, 0xf6, 0x3f, 0x49, 0x53, + 0x52, 0x6c, 0x03, 0x39, 0x71, 0xe6, 0xbd, 0x37, 0x6f, 0xdf, 0xcc, 0xbc, 0x7d, 0xef, 0xb7, 0x04, + 0x9b, 0x16, 0x61, 0x4e, 0x48, 0x2c, 0xdd, 0x0d, 0x83, 0xbd, 0x20, 0xa4, 0x9c, 0xc2, 0xaa, 0x47, + 0x2d, 0xe2, 0xb2, 0xee, 0x87, 0xb6, 0xc3, 0x4f, 0xa3, 0xe3, 0x3d, 0x93, 0x7a, 0x1f, 0xd9, 0xd4, + 0xa6, 0x1f, 0x49, 0xf5, 0x71, 0x74, 0x22, 0x67, 0x72, 0x22, 0x47, 0xf1, 0xb2, 0x6e, 0xcb, 0x30, + 0xb9, 0x43, 0x7d, 0x96, 0x4c, 0x77, 0x4d, 0xc3, 0x3c, 0x25, 0x96, 0x6e, 0x91, 0x80, 0xf8, 0x16, + 0xf1, 0xcd, 0x51, 0xa2, 0xb8, 0x61, 0x92, 0x90, 0x3b, 0x27, 0x8e, 0x69, 0x70, 0xa2, 0x07, 0x21, + 0x0d, 0xc4, 0x94, 0xa4, 0xcb, 0xae, 0x13, 0xff, 0xcc, 0x09, 0xa9, 0xef, 0x11, 0x9f, 0xeb, 0x67, + 0x46, 0xe8, 0x18, 0xc7, 0x6e, 0xa6, 0xdc, 0xf1, 0xa8, 0x15, 0xaf, 0x74, 0xa8, 0xaf, 0x73, 0xc3, + 0x4e, 0x1f, 0xed, 0x13, 0xfe, 0x8c, 0x86, 0x4f, 0x93, 0x69, 0x87, 0x11, 0x33, 0x0a, 0x1d, 0x3e, + 0xd2, 0xed, 0x90, 0x46, 0xc9, 0xb6, 0xba, 0xf0, 0x8c, 0xba, 0x91, 0x47, 0x74, 0x8f, 0x46, 0x3e, + 0x4f, 0x1d, 0x9a, 0xa7, 0xc4, 0x7c, 0xaa, 0x5b, 0xe4, 0xc4, 0xf1, 0x1d, 0xe1, 0x34, 0x91, 0x6f, + 0x3a, 0x9e, 0x61, 0x13, 0xdd, 0x35, 0x46, 0x24, 0x4c, 0x45, 0x1e, 0xe1, 0xa1, 0x63, 0x8a, 0xa7, + 0xa6, 0xe1, 0xb4, 0x98, 0x63, 0x11, 0xd3, 0x48, 0x2d, 0x3a, 0x2e, 0xb5, 0xf5, 0x50, 0xec, 0xca, + 0x75, 0x3c, 0x27, 0x7d, 0x04, 0x38, 0x71, 0x5c, 0x12, 0x8f, 0x07, 0xff, 0x59, 0x01, 0xe8, 0x5e, + 0x7c, 0xde, 0xfb, 0xf8, 0xf0, 0x48, 0x9c, 0x4f, 0xe4, 0x3a, 0xbe, 0xfd, 0xc0, 0x3f, 0xa1, 0xf0, + 0x21, 0xd8, 0x28, 0xdc, 0x85, 0xfe, 0x94, 0x8c, 0x90, 0xd2, 0x57, 0x86, 0x8d, 0x3b, 0xdb, 0x7b, + 0xf1, 0x85, 0xec, 0xe5, 0x4b, 0x1f, 0x92, 0x91, 0xd6, 0xfc, 0x6e, 0xac, 0x56, 0x9e, 0x8f, 0x55, + 0x65, 0x32, 0x56, 0x2b, 0xb8, 0x95, 0xac, 0xdd, 0x0f, 0x83, 0x87, 0x64, 0x04, 0xf7, 0x00, 0x30, + 0x7c, 0x9f, 0x72, 0x79, 0x52, 0x68, 0xa9, 0xaf, 0x0c, 0xeb, 0xda, 0xfa, 0x64, 0xac, 0x16, 0xa4, + 0xb8, 0x30, 0x86, 0xef, 0x83, 0xba, 0xe3, 0x33, 0x6e, 0xf8, 0x26, 0x61, 0x68, 0xb9, 0xaf, 0x0c, + 0x57, 0xb5, 0xd6, 0x64, 0xac, 0xe6, 0x42, 0x9c, 0x0f, 0xe1, 0x37, 0xa0, 0x53, 0x8c, 0x34, 0x24, + 0x8c, 0x46, 0xa1, 0x49, 0xd0, 0x8a, 0x0c, 0xb7, 0x3b, 0x1b, 0x2e, 0x4e, 0x2c, 0xa6, 0x62, 0x86, + 0x79, 0xcc, 0xa9, 0x05, 0xfc, 0x1d, 0xa8, 0x86, 0x34, 0xe2, 0x84, 0xa1, 0x55, 0xe9, 0x6d, 0x2b, + 0xf5, 0x76, 0x28, 0x4e, 0x10, 0x4b, 0x95, 0xb6, 0x2e, 0xdc, 0xfc, 0x77, 0xac, 0x56, 0xe3, 0x39, + 0x4e, 0x96, 0xc0, 0x43, 0xd0, 0x9e, 0xce, 0x10, 0x54, 0x95, 0x6e, 0x76, 0x53, 0x37, 0x07, 0x05, + 0xfd, 0x23, 0xc3, 0x9e, 0x8a, 0x68, 0xc3, 0x2b, 0xab, 0xa1, 0x06, 0xda, 0x49, 0xda, 0x04, 0xae, + 0x61, 0x12, 0x91, 0x95, 0xa8, 0x56, 0xf6, 0xf8, 0x44, 0xea, 0x0f, 0x53, 0x35, 0xde, 0x38, 0x2b, + 0x0b, 0xa0, 0x06, 0x5a, 0xd9, 0xe4, 0x91, 0x61, 0x33, 0xb4, 0xd6, 0x5f, 0x1e, 0xd6, 0xb5, 0x1b, + 0x93, 0xb1, 0x8a, 0x32, 0xaf, 0x32, 0xaf, 0x3e, 0xa0, 0x9e, 0xc3, 0x89, 0x17, 0xf0, 0x11, 0x2e, + 0x2f, 0x19, 0xbc, 0x6c, 0x81, 0xcd, 0xc2, 0x79, 0x46, 0xfe, 0x9b, 0x4f, 0x99, 0xbf, 0x80, 0xed, + 0xb9, 0xef, 0x1e, 0x5a, 0xea, 0x2f, 0x0f, 0x1b, 0x77, 0xae, 0xa7, 0x2e, 0xbf, 0xc8, 0x8d, 0x9e, + 0x24, 0x36, 0x5a, 0x43, 0x38, 0x9e, 0x8c, 0xd5, 0x65, 0xe2, 0x9f, 0xe1, 0x0e, 0x99, 0xb5, 0x60, + 0xf0, 0x16, 0x58, 0x65, 0x84, 0x47, 0x81, 0xcc, 0xae, 0xc6, 0x9d, 0xf5, 0xd4, 0xdd, 0xe7, 0xb2, + 0x6a, 0xe0, 0x58, 0x09, 0xdf, 0x05, 0xd5, 0xb8, 0x8c, 0x24, 0xc9, 0x34, 0x6d, 0x96, 0x68, 0xe1, + 0x10, 0xd4, 0x3c, 0xea, 0x3b, 0x9c, 0x86, 0x49, 0x9e, 0x4c, 0x1b, 0xa6, 0x6a, 0xf8, 0x0d, 0xe8, + 0x5a, 0x24, 0x08, 0x89, 0x28, 0x37, 0x96, 0xce, 0xb8, 0x11, 0x72, 0x9d, 0x3b, 0x1e, 0xa1, 0x11, + 0xd7, 0x99, 0xcc, 0x8e, 0x96, 0x76, 0x73, 0x32, 0x56, 0x77, 0x4b, 0xaa, 0xfc, 0x26, 0x90, 0x82, + 0x77, 0x73, 0x07, 0x47, 0xc2, 0xe8, 0x51, 0x6c, 0x73, 0x24, 0xde, 0xb2, 0x20, 0x74, 0xce, 0x1c, + 0x97, 0xd8, 0xc4, 0x92, 0x79, 0xb1, 0x16, 0xbf, 0x65, 0xb9, 0x14, 0x17, 0xc6, 0xf0, 0x43, 0x00, + 0xcc, 0x20, 0xd2, 0x9f, 0x11, 0xc7, 0x3e, 0xe5, 0x68, 0x4d, 0x3e, 0x5b, 0xda, 0xe7, 0x52, 0x5c, + 0x37, 0x83, 0xe8, 0x8f, 0x72, 0x08, 0x11, 0x58, 0x0d, 0x68, 0xc8, 0x19, 0xaa, 0xf7, 0x97, 0x87, + 0x2d, 0x6d, 0xa9, 0x5d, 0xc1, 0xb1, 0x00, 0x6a, 0xa0, 0x49, 0xec, 0x90, 0x30, 0xa6, 0x87, 0x91, + 0xb8, 0x22, 0x20, 0xaf, 0xe8, 0x5a, 0x7a, 0x06, 0x47, 0x49, 0xfd, 0xbb, 0x2f, 0xca, 0x1f, 0x8e, + 0x5c, 0xa2, 0xad, 0x88, 0x0b, 0xc2, 0x8d, 0x78, 0x91, 0x90, 0x30, 0x11, 0x8c, 0x28, 0x58, 0xc9, + 0xbb, 0xdb, 0xc8, 0x4b, 0x44, 0x2e, 0xc5, 0x75, 0x97, 0xda, 0x47, 0xf1, 0x8b, 0xf9, 0x6b, 0xd0, + 0x8c, 0x2b, 0x20, 0xd3, 0xed, 0xc8, 0xb1, 0x50, 0x53, 0x2e, 0x80, 0x93, 0xb1, 0x5a, 0x96, 0x2b, + 0xb8, 0x91, 0xcc, 0xef, 0x47, 0x4e, 0xbc, 0xe5, 0x90, 0xc8, 0xb3, 0x37, 0x38, 0x6a, 0xf5, 0x95, + 0xe1, 0x72, 0xb2, 0xe5, 0x4c, 0x8a, 0xeb, 0xc9, 0xf8, 0x73, 0x0e, 0x1f, 0x80, 0xad, 0xe9, 0xbe, + 0xe1, 0x10, 0x86, 0xd6, 0xe5, 0xfe, 0x50, 0xba, 0xbf, 0xbb, 0xd2, 0xe4, 0x5e, 0xd6, 0x59, 0x30, + 0x34, 0xcb, 0x12, 0x87, 0x30, 0xf8, 0x09, 0xe8, 0xb8, 0xc4, 0x36, 0xcc, 0x91, 0x6e, 0xd1, 0x67, + 0xbe, 0x4b, 0x0d, 0x4b, 0x8f, 0x18, 0x09, 0xd1, 0x86, 0x0c, 0x7c, 0x09, 0x29, 0x18, 0xc6, 0xfa, + 0x7b, 0x89, 0xfa, 0x31, 0x23, 0x21, 0xbc, 0x0f, 0xfa, 0x3c, 0x8c, 0x98, 0xcc, 0x95, 0x11, 0xe3, + 0xc4, 0xd3, 0x0b, 0xed, 0x8a, 0xe9, 0x81, 0xc1, 0x4f, 0x51, 0x5b, 0x78, 0xc0, 0x37, 0x13, 0xbb, + 0x23, 0x69, 0x76, 0xb7, 0x60, 0x75, 0x68, 0xf0, 0x53, 0xf8, 0x29, 0x68, 0x15, 0x1b, 0x0e, 0x43, + 0x9b, 0x72, 0x0f, 0x5b, 0xe5, 0xb2, 0x71, 0x20, 0x74, 0xb8, 0x79, 0x96, 0x4f, 0x18, 0x7c, 0x0f, + 0xd4, 0x92, 0x7e, 0x86, 0xa0, 0xcc, 0xed, 0x8d, 0x74, 0xcd, 0xd7, 0xb1, 0x18, 0xa7, 0x7a, 0xf8, + 0x07, 0xd0, 0x2e, 0x67, 0xb4, 0xc7, 0xd0, 0x96, 0x3c, 0xe3, 0xce, 0x64, 0xac, 0xce, 0xe8, 0xf0, + 0x3a, 0x2b, 0xe4, 0xef, 0x81, 0xa8, 0xe4, 0x3b, 0xf3, 0xbb, 0x31, 0xea, 0xc8, 0x27, 0xdf, 0xcc, + 0x4e, 0x3c, 0xb7, 0x3a, 0xcc, 0x8c, 0x64, 0x56, 0x29, 0x78, 0xdb, 0x9c, 0xa7, 0x84, 0xb7, 0xc1, + 0x7a, 0xdc, 0x45, 0xc5, 0xa9, 0xfb, 0x86, 0x47, 0xd0, 0xb6, 0x3c, 0xb7, 0x96, 0x94, 0x3e, 0x4e, + 0x84, 0xb9, 0x59, 0x60, 0x30, 0xf6, 0x8c, 0x86, 0x16, 0xda, 0x29, 0x98, 0x1d, 0x26, 0x42, 0x51, + 0x88, 0xa7, 0x7b, 0x35, 0xda, 0x2d, 0x17, 0xe2, 0xbb, 0x42, 0x7f, 0x2f, 0x53, 0xe3, 0x0d, 0xb3, + 0x2c, 0x10, 0x29, 0x5c, 0xe8, 0xeb, 0x0c, 0x21, 0x79, 0x23, 0x30, 0x5d, 0xff, 0x40, 0xe8, 0xf6, + 0x85, 0x0a, 0x37, 0x9c, 0x6c, 0xcc, 0xe0, 0xd7, 0xa0, 0x51, 0xe8, 0xfd, 0xe8, 0x9a, 0x5c, 0xf5, + 0xde, 0x9c, 0x2e, 0x17, 0x57, 0xe5, 0xbd, 0x03, 0x69, 0x2c, 0xca, 0xf6, 0x17, 0x3e, 0x0f, 0x47, + 0x32, 0xd5, 0x80, 0x97, 0x09, 0xe1, 0xfb, 0x60, 0x2d, 0x01, 0x07, 0x86, 0xba, 0xd2, 0x59, 0x76, + 0xc1, 0x47, 0xb1, 0x1c, 0x67, 0x06, 0xf0, 0x33, 0xb0, 0x5e, 0xc6, 0x0a, 0x74, 0x5d, 0xee, 0xba, + 0x93, 0x2e, 0xd9, 0xa7, 0x36, 0x36, 0x38, 0xd9, 0x17, 0x3a, 0xdc, 0x74, 0x0b, 0x33, 0xf8, 0x27, + 0xd0, 0x29, 0xa6, 0x20, 0xb1, 0x74, 0xc1, 0x22, 0x0c, 0xdd, 0x90, 0x0f, 0x6d, 0xa6, 0x1e, 0xbe, + 0x74, 0x5c, 0xa2, 0xa1, 0xc9, 0x58, 0x9d, 0x6b, 0x8d, 0x61, 0x21, 0x39, 0x89, 0x25, 0x8c, 0x59, + 0xf7, 0x31, 0xd8, 0x98, 0xda, 0x25, 0x6c, 0x83, 0xe5, 0xb4, 0xff, 0xd4, 0xb1, 0x18, 0xc2, 0x0f, + 0xc0, 0xea, 0x99, 0xe1, 0x46, 0x44, 0xe2, 0x47, 0xe3, 0xce, 0x4e, 0xd6, 0x82, 0xd3, 0x95, 0x4f, + 0x84, 0x16, 0xc7, 0x46, 0x9f, 0x2d, 0x7d, 0xaa, 0x0c, 0xfe, 0xa6, 0x80, 0x46, 0xa1, 0xcf, 0xc3, + 0xdf, 0x64, 0x30, 0xa0, 0xc8, 0x90, 0xd5, 0x39, 0x30, 0xb0, 0x17, 0xff, 0xc8, 0x20, 0x52, 0x10, + 0xe8, 0xfe, 0x16, 0x34, 0x0a, 0xe2, 0x39, 0xb1, 0x75, 0x8a, 0xb1, 0x35, 0x8b, 0x31, 0xfc, 0xb0, + 0x04, 0xda, 0xf9, 0x9d, 0x3e, 0x0e, 0x2c, 0x83, 0x13, 0xd8, 0x2b, 0xe2, 0x91, 0x70, 0xb3, 0xfa, + 0x55, 0xa5, 0x48, 0x44, 0x39, 0xb5, 0x2c, 0x2d, 0xa6, 0x16, 0x65, 0x0e, 0xb5, 0xf4, 0x4b, 0xac, + 0x26, 0xda, 0x63, 0xfd, 0x2b, 0xa5, 0x44, 0x67, 0x0f, 0xca, 0x19, 0xb8, 0x22, 0x0f, 0x63, 0x38, + 0x9b, 0x81, 0x71, 0xb4, 0xd3, 0x09, 0x58, 0x4c, 0xbe, 0xb7, 0x74, 0x73, 0x5a, 0x07, 0x40, 0x1a, + 0x88, 0x58, 0x0d, 0x57, 0xcf, 0x8e, 0x45, 0xdb, 0x06, 0x5b, 0x99, 0x34, 0xdf, 0xce, 0xe0, 0x1f, + 0x0a, 0x68, 0x95, 0xc0, 0x04, 0x7e, 0x0c, 0x9a, 0x41, 0x48, 0x4d, 0xd1, 0xd0, 0xe2, 0x26, 0x22, + 0x6b, 0x74, 0x5b, 0x34, 0x97, 0xa2, 0x1c, 0x37, 0x92, 0x99, 0x6c, 0x2d, 0x03, 0x50, 0xb5, 0xa8, + 0x67, 0x38, 0x29, 0xdf, 0x82, 0xc9, 0x58, 0x4d, 0x24, 0x38, 0xf9, 0x85, 0xbf, 0x04, 0x6b, 0xe2, + 0xf5, 0x91, 0x4e, 0xe5, 0xc9, 0x6a, 0xcd, 0xc9, 0x58, 0xcd, 0x64, 0xb8, 0xe6, 0x52, 0x5b, 0x38, + 0x1b, 0xfc, 0x5b, 0x01, 0x70, 0x16, 0x58, 0xe1, 0xaf, 0x40, 0xdd, 0x23, 0x1e, 0x0d, 0x47, 0xba, + 0x77, 0x1c, 0x5f, 0x7c, 0xcc, 0xc5, 0x99, 0x10, 0xaf, 0xc5, 0xc3, 0x83, 0x63, 0x78, 0x0b, 0xd4, + 0x2c, 0x87, 0x3d, 0x15, 0x96, 0x4b, 0xd2, 0xb2, 0x31, 0x19, 0xab, 0xa9, 0x08, 0x57, 0xc5, 0xe0, + 0xe0, 0x18, 0xbe, 0x03, 0x6a, 0x21, 0xa5, 0x5c, 0x3f, 0x61, 0x49, 0x40, 0x32, 0x6c, 0x21, 0x3a, + 0x91, 0x29, 0x41, 0xf9, 0x97, 0x4c, 0x84, 0xed, 0x19, 0xdf, 0xea, 0x81, 0x63, 0x31, 0x09, 0x42, + 0xab, 0x71, 0xd8, 0xa9, 0x0c, 0xd7, 0x3c, 0xe3, 0xdb, 0x43, 0xc7, 0x62, 0x83, 0x7f, 0x6e, 0x02, + 0x90, 0x87, 0xfd, 0xf6, 0xce, 0xf1, 0x52, 0x51, 0x97, 0x3e, 0x22, 0x56, 0x2e, 0xf8, 0x88, 0xf8, + 0xf3, 0xab, 0x70, 0x73, 0xf5, 0x62, 0xdc, 0xac, 0x5d, 0x12, 0x35, 0xab, 0x97, 0x43, 0xcd, 0xda, + 0x42, 0xd4, 0x9c, 0xd7, 0x63, 0xaf, 0x5f, 0xa1, 0xc7, 0x1e, 0x2f, 0x04, 0xd0, 0x18, 0x02, 0x6f, + 0x4f, 0xc6, 0xaa, 0x5a, 0xb0, 0x4a, 0xf5, 0x3e, 0xbb, 0x1c, 0x88, 0x16, 0x70, 0xb8, 0xbe, 0x18, + 0x87, 0x0b, 0x49, 0x0a, 0x5e, 0x9d, 0xa4, 0xa5, 0xb4, 0x6f, 0x2c, 0x4e, 0xfb, 0x32, 0xd4, 0x36, + 0x2f, 0x82, 0xda, 0x32, 0x33, 0xb7, 0x2e, 0x64, 0xe6, 0x0c, 0x82, 0xd7, 0xa7, 0x21, 0x38, 0x2f, + 0xba, 0x1b, 0x57, 0x2f, 0xba, 0x65, 0xfa, 0x6d, 0x5f, 0x44, 0xbf, 0xc5, 0x3a, 0xb2, 0xb9, 0xa0, + 0x8e, 0xcc, 0x60, 0x32, 0xbc, 0x1c, 0x26, 0x97, 0xbf, 0xd7, 0xb7, 0x2e, 0xfc, 0x5e, 0xff, 0xfd, + 0xd4, 0x07, 0x40, 0xe7, 0x82, 0x0f, 0x80, 0x32, 0xfa, 0x6b, 0x73, 0xbe, 0x93, 0xb7, 0x17, 0x7e, + 0x27, 0xcf, 0x7e, 0x19, 0xbf, 0x82, 0xd4, 0x77, 0xde, 0x20, 0xa9, 0xef, 0xbe, 0x36, 0xa9, 0xa3, + 0x9f, 0x44, 0xea, 0xd7, 0x7e, 0x02, 0xa9, 0x77, 0x2f, 0x20, 0xf5, 0x99, 0x3f, 0x01, 0x6e, 0x5c, + 0xf9, 0x4f, 0x80, 0x52, 0x57, 0xb8, 0xb9, 0xa0, 0x2b, 0x2c, 0xc0, 0xfa, 0xde, 0x5b, 0xc0, 0x7a, + 0xf5, 0x72, 0x58, 0xdf, 0xbf, 0x2c, 0xd6, 0xff, 0xe2, 0x35, 0xb1, 0x7e, 0x70, 0x39, 0xac, 0xbf, + 0x5b, 0x86, 0xaa, 0x77, 0xe4, 0xaa, 0xc1, 0x2c, 0x54, 0x2d, 0xc2, 0xa9, 0x12, 0xcb, 0xdf, 0xba, + 0x3a, 0xcb, 0xdf, 0x7e, 0x6d, 0x96, 0x7f, 0xf7, 0x67, 0xca, 0xf2, 0xda, 0x27, 0xcf, 0x5f, 0xf4, + 0x2a, 0xdf, 0xbf, 0xe8, 0x55, 0x7e, 0x7c, 0xd1, 0x53, 0xfe, 0x7a, 0xde, 0x53, 0xfe, 0x75, 0xde, + 0x53, 0xbe, 0x3b, 0xef, 0x29, 0xcf, 0xcf, 0x7b, 0xca, 0x0f, 0xe7, 0x3d, 0xe5, 0x7f, 0xe7, 0xbd, + 0xca, 0x8f, 0xe7, 0x3d, 0xe5, 0xef, 0x2f, 0x7b, 0x95, 0xe7, 0x2f, 0x7b, 0x95, 0xef, 0x5f, 0xf6, + 0x2a, 0xc7, 0x55, 0xf9, 0x47, 0xe9, 0xc7, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x43, 0x41, + 0x1c, 0x97, 0x16, 0x00, 0x00, +} + +func (this *DesiredLRPSchedulingInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfo) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLRPKey.Equal(&that1.DesiredLRPKey) { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if this.Instances != that1.Instances { + return false + } + if !this.DesiredLRPResource.Equal(&that1.DesiredLRPResource) { + return false + } + if !this.Routes.Equal(that1.Routes) { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if !this.VolumePlacement.Equal(that1.VolumePlacement) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + return true +} +func (this *DesiredLRPRunInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPRunInfo) + if !ok { + that2, ok := that.(DesiredLRPRunInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLRPKey.Equal(&that1.DesiredLRPKey) { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(&that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Setup.Equal(that1.Setup) { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if !this.Monitor.Equal(that1.Monitor) { + return false + } + if this.DeprecatedStartTimeoutS != that1.DeprecatedStartTimeoutS { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(&that1.EgressRules[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if this.StartTimeoutMs != that1.StartTimeoutMs { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if !this.CheckDefinition.Equal(that1.CheckDefinition) { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + if len(this.Sidecars) != len(that1.Sidecars) { + return false + } + for i := range this.Sidecars { + if !this.Sidecars[i].Equal(that1.Sidecars[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + if len(this.VolumeMountedFiles) != len(that1.VolumeMountedFiles) { + return false + } + for i := range this.VolumeMountedFiles { + if !this.VolumeMountedFiles[i].Equal(that1.VolumeMountedFiles[i]) { + return false + } + } + return true +} +func (this *ProtoRoutes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProtoRoutes) + if !ok { + that2, ok := that.(ProtoRoutes) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Routes) != len(that1.Routes) { + return false + } + for i := range this.Routes { + if !bytes.Equal(this.Routes[i], that1.Routes[i]) { + return false + } + } + return true +} +func (this *DesiredLRPUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate) + if !ok { + that2, ok := that.(DesiredLRPUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.OptionalInstances == nil { + if this.OptionalInstances != nil { + return false + } + } else if this.OptionalInstances == nil { + return false + } else if !this.OptionalInstances.Equal(that1.OptionalInstances) { + return false + } + if that1.Routes == nil { + if this.Routes != nil { + return false + } + } else if !this.Routes.Equal(*that1.Routes) { + return false + } + if that1.OptionalAnnotation == nil { + if this.OptionalAnnotation != nil { + return false + } + } else if this.OptionalAnnotation == nil { + return false + } else if !this.OptionalAnnotation.Equal(that1.OptionalAnnotation) { + return false + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + return true +} +func (this *DesiredLRPUpdate_Instances) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate_Instances) + if !ok { + that2, ok := that.(DesiredLRPUpdate_Instances) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Instances != that1.Instances { + return false + } + return true +} +func (this *DesiredLRPUpdate_Annotation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate_Annotation) + if !ok { + that2, ok := that.(DesiredLRPUpdate_Annotation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Annotation != that1.Annotation { + return false + } + return true +} +func (this *DesiredLRPKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPKey) + if !ok { + that2, ok := that.(DesiredLRPKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + return true +} +func (this *DesiredLRPResource) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPResource) + if !ok { + that2, ok := that.(DesiredLRPResource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if this.MaxPids != that1.MaxPids { + return false + } + return true +} +func (this *DesiredLRP) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRP) + if !ok { + that2, ok := that.(DesiredLRP) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if this.Instances != that1.Instances { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Setup.Equal(that1.Setup) { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.StartTimeoutMs != that1.StartTimeoutMs { + return false + } + if this.DeprecatedStartTimeoutS != that1.DeprecatedStartTimeoutS { + return false + } + if !this.Monitor.Equal(that1.Monitor) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if that1.Routes == nil { + if this.Routes != nil { + return false + } + } else if !this.Routes.Equal(*that1.Routes) { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(that1.EgressRules[i]) { + return false + } + } + if !this.ModificationTag.Equal(that1.ModificationTag) { + return false + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if this.MaxPids != that1.MaxPids { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if !this.CheckDefinition.Equal(that1.CheckDefinition) { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + if len(this.Sidecars) != len(that1.Sidecars) { + return false + } + for i := range this.Sidecars { + if !this.Sidecars[i].Equal(that1.Sidecars[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + if len(this.VolumeMountedFiles) != len(that1.VolumeMountedFiles) { + return false + } + for i := range this.VolumeMountedFiles { + if !this.VolumeMountedFiles[i].Equal(that1.VolumeMountedFiles[i]) { + return false + } + } + return true +} +func (this *DesiredLRPSchedulingInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.DesiredLRPSchedulingInfo{") + s = append(s, "DesiredLRPKey: "+strings.Replace(this.DesiredLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + s = append(s, "Instances: "+fmt.Sprintf("%#v", this.Instances)+",\n") + s = append(s, "DesiredLRPResource: "+strings.Replace(this.DesiredLRPResource.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + if this.VolumePlacement != nil { + s = append(s, "VolumePlacement: "+fmt.Sprintf("%#v", this.VolumePlacement)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPRunInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 32) + s = append(s, "&models.DesiredLRPRunInfo{") + s = append(s, "DesiredLRPKey: "+strings.Replace(this.DesiredLRPKey.GoString(), `&`, ``, 1)+",\n") + if this.EnvironmentVariables != nil { + vs := make([]EnvironmentVariable, len(this.EnvironmentVariables)) + for i := range vs { + vs[i] = this.EnvironmentVariables[i] + } + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", vs)+",\n") + } + if this.Setup != nil { + s = append(s, "Setup: "+fmt.Sprintf("%#v", this.Setup)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + if this.Monitor != nil { + s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n") + } + s = append(s, "DeprecatedStartTimeoutS: "+fmt.Sprintf("%#v", this.DeprecatedStartTimeoutS)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + if this.EgressRules != nil { + vs := make([]SecurityGroupRule, len(this.EgressRules)) + for i := range vs { + vs[i] = this.EgressRules[i] + } + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "StartTimeoutMs: "+fmt.Sprintf("%#v", this.StartTimeoutMs)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.CheckDefinition != nil { + s = append(s, "CheckDefinition: "+fmt.Sprintf("%#v", this.CheckDefinition)+",\n") + } + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.Sidecars != nil { + s = append(s, "Sidecars: "+fmt.Sprintf("%#v", this.Sidecars)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + if this.VolumeMountedFiles != nil { + s = append(s, "VolumeMountedFiles: "+fmt.Sprintf("%#v", this.VolumeMountedFiles)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProtoRoutes) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ProtoRoutes{") + keysForRoutes := make([]string, 0, len(this.Routes)) + for k, _ := range this.Routes { + keysForRoutes = append(keysForRoutes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRoutes) + mapStringForRoutes := "map[string][]byte{" + for _, k := range keysForRoutes { + mapStringForRoutes += fmt.Sprintf("%#v: %#v,", k, this.Routes[k]) + } + mapStringForRoutes += "}" + if this.Routes != nil { + s = append(s, "Routes: "+mapStringForRoutes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPUpdate) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.DesiredLRPUpdate{") + if this.OptionalInstances != nil { + s = append(s, "OptionalInstances: "+fmt.Sprintf("%#v", this.OptionalInstances)+",\n") + } + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + if this.OptionalAnnotation != nil { + s = append(s, "OptionalAnnotation: "+fmt.Sprintf("%#v", this.OptionalAnnotation)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPUpdate_Instances) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.DesiredLRPUpdate_Instances{` + + `Instances:` + fmt.Sprintf("%#v", this.Instances) + `}`}, ", ") + return s +} +func (this *DesiredLRPUpdate_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.DesiredLRPUpdate_Annotation{` + + `Annotation:` + fmt.Sprintf("%#v", this.Annotation) + `}`}, ", ") + return s +} +func (this *DesiredLRPKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesiredLRPKey{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPResource) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.DesiredLRPResource{") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRP) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 42) + s = append(s, "&models.DesiredLRP{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + s = append(s, "Instances: "+fmt.Sprintf("%#v", this.Instances)+",\n") + if this.EnvironmentVariables != nil { + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", this.EnvironmentVariables)+",\n") + } + if this.Setup != nil { + s = append(s, "Setup: "+fmt.Sprintf("%#v", this.Setup)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "StartTimeoutMs: "+fmt.Sprintf("%#v", this.StartTimeoutMs)+",\n") + s = append(s, "DeprecatedStartTimeoutS: "+fmt.Sprintf("%#v", this.DeprecatedStartTimeoutS)+",\n") + if this.Monitor != nil { + s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + if this.EgressRules != nil { + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", this.EgressRules)+",\n") + } + if this.ModificationTag != nil { + s = append(s, "ModificationTag: "+fmt.Sprintf("%#v", this.ModificationTag)+",\n") + } + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.CheckDefinition != nil { + s = append(s, "CheckDefinition: "+fmt.Sprintf("%#v", this.CheckDefinition)+",\n") + } + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.Sidecars != nil { + s = append(s, "Sidecars: "+fmt.Sprintf("%#v", this.Sidecars)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + if this.VolumeMountedFiles != nil { + s = append(s, "VolumeMountedFiles: "+fmt.Sprintf("%#v", this.VolumeMountedFiles)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDesiredLrp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DesiredLRPSchedulingInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.VolumePlacement != nil { + { + size, err := m.VolumePlacement.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.DesiredLRPResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Instances != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x18 + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.DesiredLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DesiredLRPRunInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPRunInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPRunInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeMountedFiles) > 0 { + for iNdEx := len(m.VolumeMountedFiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMountedFiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.CheckDefinition != nil { + { + size, err := m.CheckDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.StartTimeoutMs != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.StartTimeoutMs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x7a + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.CreatedAt != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x68 + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x62 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x5a + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x48 + } + } + if m.CpuWeight != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x40 + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.DeprecatedStartTimeoutS != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DeprecatedStartTimeoutS)) + i-- + dAtA[i] = 0x30 + } + if m.Monitor != nil { + { + size, err := m.Monitor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Setup != nil { + { + size, err := m.Setup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.DesiredLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProtoRoutes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtoRoutes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtoRoutes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Routes) > 0 { + for k := range m.Routes { + v := m.Routes[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.OptionalAnnotation != nil { + { + size := m.OptionalAnnotation.Size() + i -= size + if _, err := m.OptionalAnnotation.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Routes != nil { + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.OptionalInstances != nil { + { + size := m.OptionalInstances.Size() + i -= size + if _, err := m.OptionalInstances.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPUpdate_Instances) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate_Instances) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *DesiredLRPUpdate_Annotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate_Annotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *DesiredLRPKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxPids != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x20 + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0x1a + } + if m.DiskMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.MemoryMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeMountedFiles) > 0 { + for iNdEx := len(m.VolumeMountedFiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMountedFiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.CheckDefinition != nil { + { + size, err := m.CheckDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.MaxPids != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.StartTimeoutMs != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.StartTimeoutMs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.ModificationTag != nil { + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Routes != nil { + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x70 + } + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + if m.CpuWeight != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x60 + } + if m.MemoryMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x58 + } + if m.DiskMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x50 + } + if m.Monitor != nil { + { + size, err := m.Monitor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.DeprecatedStartTimeoutS != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DeprecatedStartTimeoutS)) + i-- + dAtA[i] = 0x40 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Setup != nil { + { + size, err := m.Setup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Instances != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x20 + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0x1a + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDesiredLrp(dAtA []byte, offset int, v uint64) int { + offset -= sovDesiredLrp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DesiredLRPSchedulingInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DesiredLRPKey.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Instances != 0 { + n += 1 + sovDesiredLrp(uint64(m.Instances)) + } + l = m.DesiredLRPResource.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = m.ModificationTag.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + if m.VolumePlacement != nil { + l = m.VolumePlacement.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPRunInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DesiredLRPKey.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Setup != nil { + l = m.Setup.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DeprecatedStartTimeoutS != 0 { + n += 1 + sovDesiredLrp(uint64(m.DeprecatedStartTimeoutS)) + } + if m.Privileged { + n += 2 + } + if m.CpuWeight != 0 { + n += 1 + sovDesiredLrp(uint64(m.CpuWeight)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovDesiredLrp(uint64(e)) + } + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovDesiredLrp(uint64(m.CreatedAt)) + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.StartTimeoutMs != 0 { + n += 2 + sovDesiredLrp(uint64(m.StartTimeoutMs)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.CheckDefinition != nil { + l = m.CheckDefinition.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 2 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMountedFiles) > 0 { + for _, e := range m.VolumeMountedFiles { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + return n +} + +func (m *ProtoRoutes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Routes) > 0 { + for k, v := range m.Routes { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovDesiredLrp(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DesiredLRPUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OptionalInstances != nil { + n += m.OptionalInstances.Size() + } + if m.Routes != nil { + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.OptionalAnnotation != nil { + n += m.OptionalAnnotation.Size() + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DesiredLRPUpdate_Instances) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovDesiredLrp(uint64(m.Instances)) + return n +} +func (m *DesiredLRPUpdate_Annotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Annotation) + n += 1 + l + sovDesiredLrp(uint64(l)) + return n +} +func (m *DesiredLRPKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + return n +} + +func (m *DesiredLRPResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.MemoryMb)) + } + if m.DiskMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.DiskMb)) + } + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.MaxPids != 0 { + n += 1 + sovDesiredLrp(uint64(m.MaxPids)) + } + return n +} + +func (m *DesiredLRP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Instances != 0 { + n += 1 + sovDesiredLrp(uint64(m.Instances)) + } + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Setup != nil { + l = m.Setup.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DeprecatedStartTimeoutS != 0 { + n += 1 + sovDesiredLrp(uint64(m.DeprecatedStartTimeoutS)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.MemoryMb)) + } + if m.CpuWeight != 0 { + n += 1 + sovDesiredLrp(uint64(m.CpuWeight)) + } + if m.Privileged { + n += 2 + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovDesiredLrp(uint64(e)) + } + } + if m.Routes != nil { + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.ModificationTag != nil { + l = m.ModificationTag.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.StartTimeoutMs != 0 { + n += 2 + sovDesiredLrp(uint64(m.StartTimeoutMs)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.MaxPids != 0 { + n += 2 + sovDesiredLrp(uint64(m.MaxPids)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.CheckDefinition != nil { + l = m.CheckDefinition.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 2 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMountedFiles) > 0 { + for _, e := range m.VolumeMountedFiles { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + return n +} + +func sovDesiredLrp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDesiredLrp(x uint64) (n int) { + return sovDesiredLrp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DesiredLRPSchedulingInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPSchedulingInfo{`, + `DesiredLRPKey:` + strings.Replace(strings.Replace(this.DesiredLRPKey.String(), "DesiredLRPKey", "DesiredLRPKey", 1), `&`, ``, 1) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `DesiredLRPResource:` + strings.Replace(strings.Replace(this.DesiredLRPResource.String(), "DesiredLRPResource", "DesiredLRPResource", 1), `&`, ``, 1) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `VolumePlacement:` + strings.Replace(fmt.Sprintf("%v", this.VolumePlacement), "VolumePlacement", "VolumePlacement", 1) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPRunInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + repeatedStringForSidecars := "[]*Sidecar{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += strings.Replace(fmt.Sprintf("%v", f), "Sidecar", "Sidecar", 1) + "," + } + repeatedStringForSidecars += "}" + repeatedStringForVolumeMountedFiles := "[]*File{" + for _, f := range this.VolumeMountedFiles { + repeatedStringForVolumeMountedFiles += strings.Replace(fmt.Sprintf("%v", f), "File", "File", 1) + "," + } + repeatedStringForVolumeMountedFiles += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRPRunInfo{`, + `DesiredLRPKey:` + strings.Replace(strings.Replace(this.DesiredLRPKey.String(), "DesiredLRPKey", "DesiredLRPKey", 1), `&`, ``, 1) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Setup:` + strings.Replace(fmt.Sprintf("%v", this.Setup), "Action", "Action", 1) + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Action", "Action", 1) + `,`, + `DeprecatedStartTimeoutS:` + fmt.Sprintf("%v", this.DeprecatedStartTimeoutS) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `StartTimeoutMs:` + fmt.Sprintf("%v", this.StartTimeoutMs) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `CheckDefinition:` + strings.Replace(fmt.Sprintf("%v", this.CheckDefinition), "CheckDefinition", "CheckDefinition", 1) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `VolumeMountedFiles:` + repeatedStringForVolumeMountedFiles + `,`, + `}`, + }, "") + return s +} +func (this *ProtoRoutes) String() string { + if this == nil { + return "nil" + } + keysForRoutes := make([]string, 0, len(this.Routes)) + for k, _ := range this.Routes { + keysForRoutes = append(keysForRoutes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRoutes) + mapStringForRoutes := "map[string][]byte{" + for _, k := range keysForRoutes { + mapStringForRoutes += fmt.Sprintf("%v: %v,", k, this.Routes[k]) + } + mapStringForRoutes += "}" + s := strings.Join([]string{`&ProtoRoutes{`, + `Routes:` + mapStringForRoutes + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate) String() string { + if this == nil { + return "nil" + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRPUpdate{`, + `OptionalInstances:` + fmt.Sprintf("%v", this.OptionalInstances) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `OptionalAnnotation:` + fmt.Sprintf("%v", this.OptionalAnnotation) + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate_Instances) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPUpdate_Instances{`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate_Annotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPUpdate_Annotation{`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPKey{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPResource{`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRP) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]*EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]*SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += strings.Replace(fmt.Sprintf("%v", f), "SecurityGroupRule", "SecurityGroupRule", 1) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + repeatedStringForSidecars := "[]*Sidecar{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += strings.Replace(fmt.Sprintf("%v", f), "Sidecar", "Sidecar", 1) + "," + } + repeatedStringForSidecars += "}" + repeatedStringForVolumeMountedFiles := "[]*File{" + for _, f := range this.VolumeMountedFiles { + repeatedStringForVolumeMountedFiles += strings.Replace(fmt.Sprintf("%v", f), "File", "File", 1) + "," + } + repeatedStringForVolumeMountedFiles += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRP{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Setup:` + strings.Replace(fmt.Sprintf("%v", this.Setup), "Action", "Action", 1) + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DeprecatedStartTimeoutS:` + fmt.Sprintf("%v", this.DeprecatedStartTimeoutS) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `ModificationTag:` + strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1) + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `StartTimeoutMs:` + fmt.Sprintf("%v", this.StartTimeoutMs) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `CheckDefinition:` + strings.Replace(fmt.Sprintf("%v", this.CheckDefinition), "CheckDefinition", "CheckDefinition", 1) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `VolumeMountedFiles:` + repeatedStringForVolumeMountedFiles + `,`, + `}`, + }, "") + return s +} +func valueToStringDesiredLrp(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DesiredLRPSchedulingInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + m.Instances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Instances |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePlacement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumePlacement == nil { + m.VolumePlacement = &VolumePlacement{} + } + if err := m.VolumePlacement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPRunInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPRunInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPRunInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Setup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Setup == nil { + m.Setup = &Action{} + } + if err := m.Setup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &Action{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedStartTimeoutS", wireType) + } + m.DeprecatedStartTimeoutS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedStartTimeoutS |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeoutMs", wireType) + } + m.StartTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckDefinition == nil { + m.CheckDefinition = &CheckDefinition{} + } + if err := m.CheckDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, &Sidecar{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMountedFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMountedFiles = append(m.VolumeMountedFiles, &File{}) + if err := m.VolumeMountedFiles[len(m.VolumeMountedFiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProtoRoutes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtoRoutes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtoRoutes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Routes[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalInstances = &DesiredLRPUpdate_Instances{v} + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = &Routes{} + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OptionalAnnotation = &DesiredLRPUpdate_Annotation{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + m.Instances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Instances |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, &EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Setup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Setup == nil { + m.Setup = &Action{} + } + if err := m.Setup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedStartTimeoutS", wireType) + } + m.DeprecatedStartTimeoutS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedStartTimeoutS |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &Action{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 14: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = &Routes{} + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, &SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ModificationTag == nil { + m.ModificationTag = &ModificationTag{} + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeoutMs", wireType) + } + m.StartTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckDefinition == nil { + m.CheckDefinition = &CheckDefinition{} + } + if err := m.CheckDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, &Sidecar{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 38: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMountedFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMountedFiles = append(m.VolumeMountedFiles, &File{}) + if err := m.VolumeMountedFiles[len(m.VolumeMountedFiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDesiredLrp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDesiredLrp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDesiredLrp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDesiredLrp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDesiredLrp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDesiredLrp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDesiredLrp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto new file mode 100644 index 00000000..d079c034 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto @@ -0,0 +1,153 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; +import "cached_dependency.proto"; +import "certificate_properties.proto"; +import "environment_variables.proto"; +import "modification_tag.proto"; +import "network.proto"; +import "security_group.proto"; +import "volume_mount.proto"; +import "check_definition.proto"; +import "image_layer.proto"; +import "metric_tags.proto"; +import "sidecar.proto"; +import "log_rate_limit.proto"; +import "file.proto"; + +message DesiredLRPSchedulingInfo { + DesiredLRPKey desired_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + string annotation = 2 [(gogoproto.jsontag) = "annotation"]; + int32 instances = 3 [(gogoproto.jsontag) = "instances"]; + + DesiredLRPResource desired_lrp_resource = 4 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + ProtoRoutes routes = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "Routes"]; + ModificationTag modification_tag = 6 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + VolumePlacement volume_placement = 7; + repeated string PlacementTags = 8 [(gogoproto.jsontag) ="placement_tags,omitempty"]; +} + +message DesiredLRPRunInfo { + DesiredLRPKey desired_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + repeated EnvironmentVariable environment_variables = 2 [(gogoproto.jsontag) = "env", (gogoproto.nullable) = false]; + Action setup = 3; + Action action = 4; + Action monitor = 5; + + uint32 deprecated_start_timeout_s = 6 [(gogoproto.jsontag) = "start_timeout,omitempty", deprecated=true]; + + bool privileged = 7 [(gogoproto.jsontag) = "privileged"]; + + uint32 cpu_weight = 8 [(gogoproto.jsontag) = "cpu_weight"]; + repeated uint32 ports = 9 [packed = false]; + repeated SecurityGroupRule egress_rules = 10 [(gogoproto.nullable) = false]; + string log_source = 11 [(gogoproto.jsontag) = "log_source"]; + string metrics_guid = 12 [deprecated=true, (gogoproto.jsontag) = "metrics_guid"]; + int64 created_at = 13 [(gogoproto.jsontag) = "created_at"]; + repeated CachedDependency cached_dependencies = 14; + string legacy_download_user = 15 [deprecated=true]; + string trusted_system_certificates_path = 16; + repeated VolumeMount volume_mounts = 17; + Network network = 18; + + int64 start_timeout_ms = 19 [(gogoproto.jsontag) = "start_timeout_ms"]; + + CertificateProperties certificate_properties = 20 [(gogoproto.nullable) = true]; + + string image_username = 21; + string image_password = 22; + + CheckDefinition check_definition = 23; + + repeated ImageLayer image_layers = 24; + + map metric_tags = 25 [deprecated=true]; + + repeated Sidecar sidecars = 26; + LogRateLimit log_rate_limit = 27; + repeated File volume_mounted_files = 28 [(gogoproto.jsontag) = "volume_mounted_files"]; +} + +// helper message for marshalling routes +message ProtoRoutes { + map routes = 1; +} + +message DesiredLRPUpdate { + oneof optional_instances { + int32 instances = 1; + } + ProtoRoutes routes = 2 [(gogoproto.nullable) = true, (gogoproto.customtype) = "Routes"]; + oneof optional_annotation { + string annotation = 3; + } + map metric_tags = 4; +} + +message DesiredLRPKey { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + string domain = 2 [(gogoproto.jsontag) = "domain"]; + string log_guid = 3 [(gogoproto.jsontag) = "log_guid"]; +} + +message DesiredLRPResource { + int32 memory_mb = 1 [(gogoproto.jsontag) = "memory_mb"]; + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + string root_fs = 3 [(gogoproto.jsontag) = "rootfs"]; + int32 max_pids = 4 [(gogoproto.jsontag) = "max_pids"]; +} + +message DesiredLRP { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + string domain = 2 [(gogoproto.jsontag) = "domain"]; + string root_fs = 3 [(gogoproto.jsontag) = "rootfs"]; + int32 instances = 4 [(gogoproto.jsontag) = "instances"]; + repeated EnvironmentVariable environment_variables = 5 [(gogoproto.jsontag) = "env"]; + Action setup = 6; + Action action = 7; + + int64 start_timeout_ms = 27 [(gogoproto.jsontag) = "start_timeout_ms"]; + uint32 deprecated_start_timeout_s = 8 [(gogoproto.jsontag) = "deprecated_timeout_ns,omitempty", deprecated=true]; + + Action monitor = 9; + int32 disk_mb = 10 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 11 [(gogoproto.jsontag) = "memory_mb"]; + uint32 cpu_weight = 12 [(gogoproto.jsontag) = "cpu_weight"]; + bool privileged = 13 [(gogoproto.jsontag) = "privileged"]; + repeated uint32 ports = 14 [packed = false]; + ProtoRoutes routes = 15 [(gogoproto.nullable) = true, (gogoproto.customtype) = "Routes"]; + string log_source = 16 [(gogoproto.jsontag) = "log_source"]; + string log_guid = 17 [(gogoproto.jsontag) = "log_guid"]; + string metrics_guid = 18 [deprecated=true, (gogoproto.jsontag) = "metrics_guid"]; + string annotation = 19 [(gogoproto.jsontag) = "annotation"]; + repeated SecurityGroupRule egress_rules = 20; + ModificationTag modification_tag = 21; + repeated CachedDependency cached_dependencies = 22; + string legacy_download_user = 23 [deprecated=true]; + string trusted_system_certificates_path = 24; + repeated VolumeMount volume_mounts = 25; + Network network = 26; + repeated string PlacementTags = 28 [(gogoproto.jsontag) ="placement_tags,omitempty"]; + int32 max_pids = 29 [(gogoproto.jsontag) = "max_pids"]; + + CertificateProperties certificate_properties = 30 [(gogoproto.nullable) = true]; + + string image_username = 31; + string image_password = 32; + + CheckDefinition check_definition = 33; + + repeated ImageLayer image_layers = 34; + + map metric_tags = 35; + + repeated Sidecar sidecars = 36; + LogRateLimit log_rate_limit = 37; + repeated File volume_mounted_files = 38 [(gogoproto.jsontag) = "volume_mounted_files"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go new file mode 100644 index 00000000..92030fec --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go @@ -0,0 +1,69 @@ +package models + +func (request *DesiredLRPsRequest) Validate() error { + return nil +} + +func (request *DesiredLRPByProcessGuidRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *DesireLRPRequest) Validate() error { + var validationError ValidationError + + if request.DesiredLrp == nil { + validationError = validationError.Append(ErrInvalidField{"desired_lrp"}) + } else if err := request.DesiredLrp.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *UpdateDesiredLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Update != nil { + if err := request.Update.Validate(); err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveDesiredLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go new file mode 100644 index 00000000..703679ff --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go @@ -0,0 +1,2806 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: desired_lrp_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DesiredLRPLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *DesiredLRPLifecycleResponse) Reset() { *m = DesiredLRPLifecycleResponse{} } +func (*DesiredLRPLifecycleResponse) ProtoMessage() {} +func (*DesiredLRPLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{0} +} +func (m *DesiredLRPLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPLifecycleResponse.Merge(m, src) +} +func (m *DesiredLRPLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPLifecycleResponse proto.InternalMessageInfo + +func (m *DesiredLRPLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type DesiredLRPsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrps []*DesiredLRP `protobuf:"bytes,2,rep,name=desired_lrps,json=desiredLrps,proto3" json:"desired_lrps,omitempty"` +} + +func (m *DesiredLRPsResponse) Reset() { *m = DesiredLRPsResponse{} } +func (*DesiredLRPsResponse) ProtoMessage() {} +func (*DesiredLRPsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{1} +} +func (m *DesiredLRPsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPsResponse.Merge(m, src) +} +func (m *DesiredLRPsResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPsResponse proto.InternalMessageInfo + +func (m *DesiredLRPsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPsResponse) GetDesiredLrps() []*DesiredLRP { + if m != nil { + return m.DesiredLrps + } + return nil +} + +type DesiredLRPsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + ProcessGuids []string `protobuf:"bytes,2,rep,name=process_guids,json=processGuids,proto3" json:"process_guids,omitempty"` +} + +func (m *DesiredLRPsRequest) Reset() { *m = DesiredLRPsRequest{} } +func (*DesiredLRPsRequest) ProtoMessage() {} +func (*DesiredLRPsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{2} +} +func (m *DesiredLRPsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPsRequest.Merge(m, src) +} +func (m *DesiredLRPsRequest) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPsRequest proto.InternalMessageInfo + +func (m *DesiredLRPsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRPsRequest) GetProcessGuids() []string { + if m != nil { + return m.ProcessGuids + } + return nil +} + +type DesiredLRPResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrp *DesiredLRP `protobuf:"bytes,2,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` +} + +func (m *DesiredLRPResponse) Reset() { *m = DesiredLRPResponse{} } +func (*DesiredLRPResponse) ProtoMessage() {} +func (*DesiredLRPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{3} +} +func (m *DesiredLRPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPResponse.Merge(m, src) +} +func (m *DesiredLRPResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPResponse proto.InternalMessageInfo + +func (m *DesiredLRPResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPResponse) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +type DesiredLRPSchedulingInfosResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrpSchedulingInfos []*DesiredLRPSchedulingInfo `protobuf:"bytes,2,rep,name=desired_lrp_scheduling_infos,json=desiredLrpSchedulingInfos,proto3" json:"desired_lrp_scheduling_infos,omitempty"` +} + +func (m *DesiredLRPSchedulingInfosResponse) Reset() { *m = DesiredLRPSchedulingInfosResponse{} } +func (*DesiredLRPSchedulingInfosResponse) ProtoMessage() {} +func (*DesiredLRPSchedulingInfosResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{4} +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfosResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfosResponse.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfosResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfosResponse proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfosResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPSchedulingInfosResponse) GetDesiredLrpSchedulingInfos() []*DesiredLRPSchedulingInfo { + if m != nil { + return m.DesiredLrpSchedulingInfos + } + return nil +} + +type DesiredLRPSchedulingInfoByProcessGuidResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrpSchedulingInfo *DesiredLRPSchedulingInfo `protobuf:"bytes,2,opt,name=desired_lrp_scheduling_info,json=desiredLrpSchedulingInfo,proto3" json:"desired_lrp_scheduling_info,omitempty"` +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Reset() { + *m = DesiredLRPSchedulingInfoByProcessGuidResponse{} +} +func (*DesiredLRPSchedulingInfoByProcessGuidResponse) ProtoMessage() {} +func (*DesiredLRPSchedulingInfoByProcessGuidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{5} +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) GetDesiredLrpSchedulingInfo() *DesiredLRPSchedulingInfo { + if m != nil { + return m.DesiredLrpSchedulingInfo + } + return nil +} + +type DesiredLRPByProcessGuidRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *DesiredLRPByProcessGuidRequest) Reset() { *m = DesiredLRPByProcessGuidRequest{} } +func (*DesiredLRPByProcessGuidRequest) ProtoMessage() {} +func (*DesiredLRPByProcessGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{6} +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPByProcessGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPByProcessGuidRequest.Merge(m, src) +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPByProcessGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPByProcessGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPByProcessGuidRequest proto.InternalMessageInfo + +func (m *DesiredLRPByProcessGuidRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +type DesireLRPRequest struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` +} + +func (m *DesireLRPRequest) Reset() { *m = DesireLRPRequest{} } +func (*DesireLRPRequest) ProtoMessage() {} +func (*DesireLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{7} +} +func (m *DesireLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesireLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesireLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesireLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesireLRPRequest.Merge(m, src) +} +func (m *DesireLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *DesireLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesireLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesireLRPRequest proto.InternalMessageInfo + +func (m *DesireLRPRequest) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +type UpdateDesiredLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Update *DesiredLRPUpdate `protobuf:"bytes,2,opt,name=update,proto3" json:"update,omitempty"` +} + +func (m *UpdateDesiredLRPRequest) Reset() { *m = UpdateDesiredLRPRequest{} } +func (*UpdateDesiredLRPRequest) ProtoMessage() {} +func (*UpdateDesiredLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{8} +} +func (m *UpdateDesiredLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateDesiredLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateDesiredLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateDesiredLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDesiredLRPRequest.Merge(m, src) +} +func (m *UpdateDesiredLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateDesiredLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDesiredLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDesiredLRPRequest proto.InternalMessageInfo + +func (m *UpdateDesiredLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *UpdateDesiredLRPRequest) GetUpdate() *DesiredLRPUpdate { + if m != nil { + return m.Update + } + return nil +} + +type RemoveDesiredLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *RemoveDesiredLRPRequest) Reset() { *m = RemoveDesiredLRPRequest{} } +func (*RemoveDesiredLRPRequest) ProtoMessage() {} +func (*RemoveDesiredLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{9} +} +func (m *RemoveDesiredLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveDesiredLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveDesiredLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveDesiredLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveDesiredLRPRequest.Merge(m, src) +} +func (m *RemoveDesiredLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveDesiredLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveDesiredLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveDesiredLRPRequest proto.InternalMessageInfo + +func (m *RemoveDesiredLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func init() { + proto.RegisterType((*DesiredLRPLifecycleResponse)(nil), "models.DesiredLRPLifecycleResponse") + proto.RegisterType((*DesiredLRPsResponse)(nil), "models.DesiredLRPsResponse") + proto.RegisterType((*DesiredLRPsRequest)(nil), "models.DesiredLRPsRequest") + proto.RegisterType((*DesiredLRPResponse)(nil), "models.DesiredLRPResponse") + proto.RegisterType((*DesiredLRPSchedulingInfosResponse)(nil), "models.DesiredLRPSchedulingInfosResponse") + proto.RegisterType((*DesiredLRPSchedulingInfoByProcessGuidResponse)(nil), "models.DesiredLRPSchedulingInfoByProcessGuidResponse") + proto.RegisterType((*DesiredLRPByProcessGuidRequest)(nil), "models.DesiredLRPByProcessGuidRequest") + proto.RegisterType((*DesireLRPRequest)(nil), "models.DesireLRPRequest") + proto.RegisterType((*UpdateDesiredLRPRequest)(nil), "models.UpdateDesiredLRPRequest") + proto.RegisterType((*RemoveDesiredLRPRequest)(nil), "models.RemoveDesiredLRPRequest") +} + +func init() { proto.RegisterFile("desired_lrp_requests.proto", fileDescriptor_7235cc1a84e38c85) } + +var fileDescriptor_7235cc1a84e38c85 = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x6b, 0x13, 0x41, + 0x18, 0xc6, 0x77, 0x2a, 0x06, 0xfa, 0x6e, 0x0a, 0x75, 0x3c, 0x74, 0x4d, 0x65, 0x1a, 0xa7, 0x97, + 0x5e, 0x9a, 0x4a, 0xa3, 0x5f, 0x20, 0x28, 0x45, 0x08, 0x52, 0x46, 0x7a, 0x94, 0x25, 0xd9, 0x9d, + 0x6c, 0x17, 0x92, 0x9d, 0xed, 0x4c, 0x56, 0xe8, 0xad, 0x1f, 0xc1, 0x8f, 0x21, 0x78, 0xf6, 0x3b, + 0x78, 0xcc, 0xb1, 0xa7, 0x62, 0x36, 0x17, 0xe9, 0xa9, 0x1f, 0x41, 0x32, 0x33, 0x75, 0x27, 0xad, + 0x51, 0x23, 0x9e, 0x92, 0x79, 0xff, 0x3c, 0xef, 0xef, 0x7d, 0x79, 0x58, 0x68, 0xc4, 0x5c, 0xa5, + 0x92, 0xc7, 0xe1, 0x50, 0xe6, 0xa1, 0xe4, 0x67, 0x05, 0x57, 0x63, 0xd5, 0xca, 0xa5, 0x18, 0x0b, + 0x5c, 0x1b, 0x89, 0x98, 0x0f, 0x55, 0x63, 0x3f, 0x49, 0xc7, 0xa7, 0x45, 0xbf, 0x15, 0x89, 0xd1, + 0x41, 0x22, 0x12, 0x71, 0xa0, 0xd3, 0xfd, 0x62, 0xa0, 0x5f, 0xfa, 0xa1, 0xff, 0x99, 0xb6, 0xc6, + 0x23, 0x47, 0xd2, 0x86, 0x7c, 0x2e, 0xa5, 0x90, 0xe6, 0x41, 0x3b, 0xb0, 0xfd, 0xca, 0x54, 0x74, + 0xd9, 0x71, 0x37, 0x1d, 0xf0, 0xe8, 0x3c, 0x1a, 0x72, 0xc6, 0x55, 0x2e, 0x32, 0xc5, 0xf1, 0x2e, + 0x3c, 0xd4, 0xd5, 0x01, 0x6a, 0xa2, 0x3d, 0xff, 0x70, 0xa3, 0x65, 0x28, 0x5a, 0xaf, 0xe7, 0x41, + 0x66, 0x72, 0xf4, 0x0c, 0x1e, 0x57, 0x1a, 0x6a, 0xa5, 0x5e, 0xfc, 0x12, 0xea, 0x0e, 0xa1, 0x0a, + 0xd6, 0x9a, 0x0f, 0xf6, 0xfc, 0x43, 0x7c, 0x5b, 0x5b, 0xe9, 0x32, 0xdf, 0xd6, 0x75, 0x65, 0xae, + 0xe8, 0x7b, 0xc0, 0x0b, 0x23, 0xf5, 0xa9, 0x30, 0x85, 0x5a, 0x2c, 0x46, 0xbd, 0x34, 0xd3, 0x23, + 0xd7, 0x3b, 0x70, 0x7d, 0xb5, 0x63, 0x23, 0xcc, 0xfe, 0xe2, 0x5d, 0xd8, 0xc8, 0xa5, 0x88, 0xb8, + 0x52, 0x61, 0x52, 0xa4, 0xb1, 0x99, 0xb8, 0xce, 0xea, 0x36, 0x78, 0x34, 0x8f, 0xd1, 0xcc, 0x95, + 0x5f, 0x6d, 0xa1, 0x36, 0xf8, 0xce, 0x42, 0xc1, 0x9a, 0x2e, 0xfd, 0xd5, 0x3e, 0x50, 0xed, 0x43, + 0x3f, 0x23, 0x78, 0x56, 0xa5, 0xde, 0x45, 0xa7, 0x3c, 0x2e, 0x86, 0x69, 0x96, 0xbc, 0xc9, 0x06, + 0x62, 0xc5, 0x83, 0xf6, 0xe0, 0xa9, 0xeb, 0x22, 0xf5, 0x53, 0x2b, 0x4c, 0xe7, 0x62, 0xf6, 0xc0, + 0xcd, 0xfb, 0x40, 0x8b, 0x53, 0xd9, 0x93, 0x0a, 0xef, 0x0e, 0x0f, 0xfd, 0x82, 0x60, 0x7f, 0x59, + 0x5f, 0xe7, 0xfc, 0xb8, 0x3a, 0xe4, 0x6a, 0xe4, 0x21, 0x6c, 0xff, 0x86, 0xdc, 0x5e, 0xf2, 0xcf, + 0xe0, 0xc1, 0x32, 0x70, 0x7a, 0x02, 0xa4, 0xea, 0xba, 0x03, 0x6a, 0x0c, 0xd4, 0x86, 0xba, 0x6b, + 0x0e, 0x6b, 0xa3, 0xcd, 0xeb, 0xab, 0x9d, 0x85, 0x38, 0xf3, 0x1d, 0xb7, 0xd0, 0x23, 0xd8, 0x34, + 0xb2, 0xda, 0x2b, 0xb7, 0x42, 0x0b, 0x2e, 0x40, 0x7f, 0xe5, 0x82, 0x0b, 0x04, 0x5b, 0x27, 0x79, + 0xdc, 0x1b, 0x73, 0xd7, 0x7c, 0xff, 0x4e, 0x86, 0x9f, 0x43, 0xad, 0xd0, 0x7a, 0xf6, 0x78, 0xc1, + 0x7d, 0x00, 0x33, 0x8f, 0xd9, 0x3a, 0xfa, 0x16, 0xb6, 0x18, 0x1f, 0x89, 0x0f, 0xff, 0x89, 0xa0, + 0xf3, 0x62, 0x32, 0x25, 0xde, 0xe5, 0x94, 0x78, 0x37, 0x53, 0x82, 0x2e, 0x4a, 0x82, 0x3e, 0x95, + 0x04, 0x7d, 0x2d, 0x09, 0x9a, 0x94, 0x04, 0x7d, 0x2b, 0x09, 0xfa, 0x5e, 0x12, 0xef, 0xa6, 0x24, + 0xe8, 0xe3, 0x8c, 0x78, 0x93, 0x19, 0xf1, 0x2e, 0x67, 0xc4, 0xeb, 0xd7, 0xf4, 0xb7, 0xa9, 0xfd, + 0x23, 0x00, 0x00, 0xff, 0xff, 0xca, 0x08, 0x89, 0xe8, 0x10, 0x05, 0x00, 0x00, +} + +func (this *DesiredLRPLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPLifecycleResponse) + if !ok { + that2, ok := that.(DesiredLRPLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *DesiredLRPsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPsResponse) + if !ok { + that2, ok := that.(DesiredLRPsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.DesiredLrps) != len(that1.DesiredLrps) { + return false + } + for i := range this.DesiredLrps { + if !this.DesiredLrps[i].Equal(that1.DesiredLrps[i]) { + return false + } + } + return true +} +func (this *DesiredLRPsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPsRequest) + if !ok { + that2, ok := that.(DesiredLRPsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if len(this.ProcessGuids) != len(that1.ProcessGuids) { + return false + } + for i := range this.ProcessGuids { + if this.ProcessGuids[i] != that1.ProcessGuids[i] { + return false + } + } + return true +} +func (this *DesiredLRPResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPResponse) + if !ok { + that2, ok := that.(DesiredLRPResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + return true +} +func (this *DesiredLRPSchedulingInfosResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfosResponse) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfosResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.DesiredLrpSchedulingInfos) != len(that1.DesiredLrpSchedulingInfos) { + return false + } + for i := range this.DesiredLrpSchedulingInfos { + if !this.DesiredLrpSchedulingInfos[i].Equal(that1.DesiredLrpSchedulingInfos[i]) { + return false + } + } + return true +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfoByProcessGuidResponse) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfoByProcessGuidResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.DesiredLrpSchedulingInfo.Equal(that1.DesiredLrpSchedulingInfo) { + return false + } + return true +} +func (this *DesiredLRPByProcessGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPByProcessGuidRequest) + if !ok { + that2, ok := that.(DesiredLRPByProcessGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *DesireLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesireLRPRequest) + if !ok { + that2, ok := that.(DesireLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + return true +} +func (this *UpdateDesiredLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UpdateDesiredLRPRequest) + if !ok { + that2, ok := that.(UpdateDesiredLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if !this.Update.Equal(that1.Update) { + return false + } + return true +} +func (this *RemoveDesiredLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveDesiredLRPRequest) + if !ok { + that2, ok := that.(RemoveDesiredLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *DesiredLRPLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesiredLRPLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrps != nil { + s = append(s, "DesiredLrps: "+fmt.Sprintf("%#v", this.DesiredLrps)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "ProcessGuids: "+fmt.Sprintf("%#v", this.ProcessGuids)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPSchedulingInfosResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPSchedulingInfosResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrpSchedulingInfos != nil { + s = append(s, "DesiredLrpSchedulingInfos: "+fmt.Sprintf("%#v", this.DesiredLrpSchedulingInfos)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPSchedulingInfoByProcessGuidResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrpSchedulingInfo != nil { + s = append(s, "DesiredLrpSchedulingInfo: "+fmt.Sprintf("%#v", this.DesiredLrpSchedulingInfo)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPByProcessGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesiredLRPByProcessGuidRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesireLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesireLRPRequest{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpdateDesiredLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.UpdateDesiredLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + if this.Update != nil { + s = append(s, "Update: "+fmt.Sprintf("%#v", this.Update)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveDesiredLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RemoveDesiredLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDesiredLrpRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DesiredLRPLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DesiredLrps) > 0 { + for iNdEx := len(m.DesiredLrps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DesiredLrps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuids) > 0 { + for iNdEx := len(m.ProcessGuids) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProcessGuids[iNdEx]) + copy(dAtA[i:], m.ProcessGuids[iNdEx]) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuids[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPSchedulingInfosResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfosResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DesiredLrpSchedulingInfos) > 0 { + for iNdEx := len(m.DesiredLrpSchedulingInfos) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DesiredLrpSchedulingInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrpSchedulingInfo != nil { + { + size, err := m.DesiredLrpSchedulingInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPByProcessGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPByProcessGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPByProcessGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesireLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesireLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesireLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateDesiredLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateDesiredLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateDesiredLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Update != nil { + { + size, err := m.Update.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveDesiredLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveDesiredLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveDesiredLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDesiredLrpRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovDesiredLrpRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DesiredLRPLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.DesiredLrps) > 0 { + for _, e := range m.DesiredLrps { + l = e.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.ProcessGuids) > 0 { + for _, s := range m.ProcessGuids { + l = len(s) + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPSchedulingInfosResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.DesiredLrpSchedulingInfos) > 0 { + for _, e := range m.DesiredLrpSchedulingInfos { + l = e.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.DesiredLrpSchedulingInfo != nil { + l = m.DesiredLrpSchedulingInfo.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPByProcessGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesireLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *UpdateDesiredLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *RemoveDesiredLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func sovDesiredLrpRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDesiredLrpRequests(x uint64) (n int) { + return sovDesiredLrpRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DesiredLRPLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForDesiredLrps := "[]*DesiredLRP{" + for _, f := range this.DesiredLrps { + repeatedStringForDesiredLrps += strings.Replace(fmt.Sprintf("%v", f), "DesiredLRP", "DesiredLRP", 1) + "," + } + repeatedStringForDesiredLrps += "}" + s := strings.Join([]string{`&DesiredLRPsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrps:` + repeatedStringForDesiredLrps + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `ProcessGuids:` + fmt.Sprintf("%v", this.ProcessGuids) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPSchedulingInfosResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForDesiredLrpSchedulingInfos := "[]*DesiredLRPSchedulingInfo{" + for _, f := range this.DesiredLrpSchedulingInfos { + repeatedStringForDesiredLrpSchedulingInfos += strings.Replace(fmt.Sprintf("%v", f), "DesiredLRPSchedulingInfo", "DesiredLRPSchedulingInfo", 1) + "," + } + repeatedStringForDesiredLrpSchedulingInfos += "}" + s := strings.Join([]string{`&DesiredLRPSchedulingInfosResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrpSchedulingInfos:` + repeatedStringForDesiredLrpSchedulingInfos + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPSchedulingInfoByProcessGuidResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrpSchedulingInfo:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrpSchedulingInfo), "DesiredLRPSchedulingInfo", "DesiredLRPSchedulingInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPByProcessGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPByProcessGuidRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func (this *DesireLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesireLRPRequest{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateDesiredLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateDesiredLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "DesiredLRPUpdate", "DesiredLRPUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveDesiredLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveDesiredLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func valueToStringDesiredLrpRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DesiredLRPLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredLrps = append(m.DesiredLrps, &DesiredLRP{}) + if err := m.DesiredLrps[len(m.DesiredLrps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuids = append(m.ProcessGuids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPSchedulingInfosResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfosResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrpSchedulingInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredLrpSchedulingInfos = append(m.DesiredLrpSchedulingInfos, &DesiredLRPSchedulingInfo{}) + if err := m.DesiredLrpSchedulingInfos[len(m.DesiredLrpSchedulingInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfoByProcessGuidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfoByProcessGuidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrpSchedulingInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrpSchedulingInfo == nil { + m.DesiredLrpSchedulingInfo = &DesiredLRPSchedulingInfo{} + } + if err := m.DesiredLrpSchedulingInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPByProcessGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPByProcessGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPByProcessGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesireLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesireLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesireLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateDesiredLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateDesiredLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateDesiredLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &DesiredLRPUpdate{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveDesiredLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveDesiredLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveDesiredLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDesiredLrpRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDesiredLrpRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDesiredLrpRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDesiredLrpRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDesiredLrpRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDesiredLrpRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDesiredLrpRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto new file mode 100644 index 00000000..be746255 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "desired_lrp.proto"; +import "error.proto"; + +message DesiredLRPLifecycleResponse { + Error error = 1; +} + +message DesiredLRPsResponse { + Error error = 1; + repeated DesiredLRP desired_lrps = 2; +} + +message DesiredLRPsRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + repeated string process_guids = 2; +} + +message DesiredLRPResponse { + Error error = 1; + DesiredLRP desired_lrp = 2; +} + +message DesiredLRPSchedulingInfosResponse { + Error error = 1; + repeated DesiredLRPSchedulingInfo desired_lrp_scheduling_infos = 2; +} + +message DesiredLRPSchedulingInfoByProcessGuidResponse { + Error error = 1; + DesiredLRPSchedulingInfo desired_lrp_scheduling_info = 2; +} + +message DesiredLRPByProcessGuidRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} + +message DesireLRPRequest { + DesiredLRP desired_lrp = 1; +} + +message UpdateDesiredLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + DesiredLRPUpdate update = 2; +} + +message RemoveDesiredLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go b/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go new file mode 100644 index 00000000..08777240 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go @@ -0,0 +1,853 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: domain.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DomainsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` +} + +func (m *DomainsResponse) Reset() { *m = DomainsResponse{} } +func (*DomainsResponse) ProtoMessage() {} +func (*DomainsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{0} +} +func (m *DomainsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DomainsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DomainsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DomainsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DomainsResponse.Merge(m, src) +} +func (m *DomainsResponse) XXX_Size() int { + return m.Size() +} +func (m *DomainsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DomainsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DomainsResponse proto.InternalMessageInfo + +func (m *DomainsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DomainsResponse) GetDomains() []string { + if m != nil { + return m.Domains + } + return nil +} + +type UpsertDomainResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *UpsertDomainResponse) Reset() { *m = UpsertDomainResponse{} } +func (*UpsertDomainResponse) ProtoMessage() {} +func (*UpsertDomainResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{1} +} +func (m *UpsertDomainResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpsertDomainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpsertDomainResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpsertDomainResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpsertDomainResponse.Merge(m, src) +} +func (m *UpsertDomainResponse) XXX_Size() int { + return m.Size() +} +func (m *UpsertDomainResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpsertDomainResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpsertDomainResponse proto.InternalMessageInfo + +func (m *UpsertDomainResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type UpsertDomainRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + Ttl uint32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl"` +} + +func (m *UpsertDomainRequest) Reset() { *m = UpsertDomainRequest{} } +func (*UpsertDomainRequest) ProtoMessage() {} +func (*UpsertDomainRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{2} +} +func (m *UpsertDomainRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpsertDomainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpsertDomainRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpsertDomainRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpsertDomainRequest.Merge(m, src) +} +func (m *UpsertDomainRequest) XXX_Size() int { + return m.Size() +} +func (m *UpsertDomainRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpsertDomainRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpsertDomainRequest proto.InternalMessageInfo + +func (m *UpsertDomainRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *UpsertDomainRequest) GetTtl() uint32 { + if m != nil { + return m.Ttl + } + return 0 +} + +func init() { + proto.RegisterType((*DomainsResponse)(nil), "models.DomainsResponse") + proto.RegisterType((*UpsertDomainResponse)(nil), "models.UpsertDomainResponse") + proto.RegisterType((*UpsertDomainRequest)(nil), "models.UpsertDomainRequest") +} + +func init() { proto.RegisterFile("domain.proto", fileDescriptor_73e6234e76dbdb84) } + +var fileDescriptor_73e6234e76dbdb84 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xc9, 0xcf, 0x4d, + 0xcc, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, + 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, + 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0x26, 0xc5, + 0x9d, 0x5a, 0x54, 0x94, 0x5f, 0x04, 0xe1, 0x28, 0x05, 0x70, 0xf1, 0xbb, 0x80, 0xcd, 0x2c, 0x0e, + 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0x52, 0xe6, 0x62, 0x05, 0xab, 0x90, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x36, 0xe2, 0xd5, 0x83, 0x58, 0xa3, 0xe7, 0x0a, 0x12, 0x0c, 0x82, 0xc8, 0x09, + 0x49, 0x70, 0xb1, 0x43, 0xdc, 0x52, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x19, 0x04, 0xe3, 0x2a, + 0x59, 0x73, 0x89, 0x84, 0x16, 0x14, 0xa7, 0x16, 0x95, 0x40, 0xcc, 0x25, 0xc9, 0x58, 0xa5, 0x10, + 0x2e, 0x61, 0x54, 0xcd, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x4a, 0x5c, 0x6c, 0x10, 0xe3, 0xc1, + 0x9a, 0x39, 0x9d, 0xb8, 0x5e, 0xdd, 0x93, 0x87, 0x8a, 0x04, 0x41, 0x69, 0x21, 0x49, 0x2e, 0xe6, + 0x92, 0x92, 0x1c, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x5e, 0x27, 0xf6, 0x57, 0xf7, 0xe4, 0x41, 0xdc, + 0x20, 0x10, 0xe1, 0x64, 0x72, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, + 0x31, 0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xe1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, + 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, + 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0x1c, 0x42, + 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, 0xd1, 0x20, 0xc4, 0x75, 0x01, 0x00, 0x00, +} + +func (this *DomainsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DomainsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "Domains: "+fmt.Sprintf("%#v", this.Domains)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpsertDomainResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.UpsertDomainResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpsertDomainRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.UpsertDomainRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "Ttl: "+fmt.Sprintf("%#v", this.Ttl)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDomain(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DomainsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DomainsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domains) > 0 { + for iNdEx := len(m.Domains) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Domains[iNdEx]) + copy(dAtA[i:], m.Domains[iNdEx]) + i = encodeVarintDomain(dAtA, i, uint64(len(m.Domains[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDomain(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpsertDomainResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpsertDomainResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpsertDomainResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDomain(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpsertDomainRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpsertDomainRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpsertDomainRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ttl != 0 { + i = encodeVarintDomain(dAtA, i, uint64(m.Ttl)) + i-- + dAtA[i] = 0x10 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDomain(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDomain(dAtA []byte, offset int, v uint64) int { + offset -= sovDomain(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DomainsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDomain(uint64(l)) + } + if len(m.Domains) > 0 { + for _, s := range m.Domains { + l = len(s) + n += 1 + l + sovDomain(uint64(l)) + } + } + return n +} + +func (m *UpsertDomainResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDomain(uint64(l)) + } + return n +} + +func (m *UpsertDomainRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDomain(uint64(l)) + } + if m.Ttl != 0 { + n += 1 + sovDomain(uint64(m.Ttl)) + } + return n +} + +func sovDomain(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDomain(x uint64) (n int) { + return sovDomain(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DomainsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Domains:` + fmt.Sprintf("%v", this.Domains) + `,`, + `}`, + }, "") + return s +} +func (this *UpsertDomainResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpsertDomainResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpsertDomainRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpsertDomainRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `Ttl:` + fmt.Sprintf("%v", this.Ttl) + `,`, + `}`, + }, "") + return s +} +func valueToStringDomain(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DomainsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DomainsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DomainsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domains = append(m.Domains, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpsertDomainResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpsertDomainResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpsertDomainResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpsertDomainRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpsertDomainRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpsertDomainRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType) + } + m.Ttl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ttl |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDomain(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDomain + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDomain + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDomain + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDomain = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDomain = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDomain = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/domain.proto b/vendor/code.cloudfoundry.org/bbs/models/domain.proto new file mode 100644 index 00000000..7028f0f7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domain.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "error.proto"; + +option (gogoproto.equal_all) = false; + +message DomainsResponse { + Error error = 1; + repeated string domains = 2; +} + +message UpsertDomainResponse { + Error error = 1; +} + +message UpsertDomainRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + uint32 ttl = 2 [(gogoproto.jsontag) = "ttl"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/domains.go b/vendor/code.cloudfoundry.org/bbs/models/domains.go new file mode 100644 index 00000000..a94ee90c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domains.go @@ -0,0 +1,36 @@ +package models + +type DomainSet map[string]struct{} + +func (set DomainSet) Add(domain string) { + set[domain] = struct{}{} +} + +func (set DomainSet) Each(predicate func(domain string)) { + for domain := range set { + predicate(domain) + } +} + +func (set DomainSet) Contains(domain string) bool { + _, found := set[domain] + return found +} + +func NewDomainSet(domains []string) DomainSet { + domainSet := DomainSet{} + for _, domain := range domains { + domainSet.Add(domain) + } + return domainSet +} + +func (request *UpsertDomainRequest) Validate() error { + var validationError ValidationError + + if request.Domain == "" { + return validationError.Append(ErrInvalidField{"domain"}) + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go new file mode 100644 index 00000000..49185848 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go @@ -0,0 +1,10 @@ +package models + +import "errors" + +func (envVar EnvironmentVariable) Validate() error { + if envVar.Name == "" { + return errors.New("invalid field: name cannot be blank") + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go new file mode 100644 index 00000000..c3db470a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go @@ -0,0 +1,436 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: environment_variables.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EnvironmentVariable struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` +} + +func (m *EnvironmentVariable) Reset() { *m = EnvironmentVariable{} } +func (*EnvironmentVariable) ProtoMessage() {} +func (*EnvironmentVariable) Descriptor() ([]byte, []int) { + return fileDescriptor_8938dda491bd78a1, []int{0} +} +func (m *EnvironmentVariable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvironmentVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvironmentVariable.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvironmentVariable) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvironmentVariable.Merge(m, src) +} +func (m *EnvironmentVariable) XXX_Size() int { + return m.Size() +} +func (m *EnvironmentVariable) XXX_DiscardUnknown() { + xxx_messageInfo_EnvironmentVariable.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvironmentVariable proto.InternalMessageInfo + +func (m *EnvironmentVariable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvironmentVariable) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func init() { + proto.RegisterType((*EnvironmentVariable)(nil), "models.EnvironmentVariable") +} + +func init() { proto.RegisterFile("environment_variables.proto", fileDescriptor_8938dda491bd78a1) } + +var fileDescriptor_8938dda491bd78a1 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcd, 0x2b, 0xcb, + 0x2c, 0xca, 0xcf, 0xcb, 0x4d, 0xcd, 0x2b, 0x89, 0x2f, 0x4b, 0x2c, 0xca, 0x4c, 0x4c, 0xca, 0x49, + 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, + 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, + 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x14, 0xc2, + 0x25, 0xec, 0x8a, 0x30, 0x35, 0x0c, 0x6a, 0xa8, 0x90, 0x0c, 0x17, 0x4b, 0x5e, 0x62, 0x6e, 0xaa, + 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xc7, 0xab, 0x7b, 0xf2, 0x60, 0x7e, 0x10, 0x98, 0x14, + 0x92, 0xe7, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x95, 0x60, 0x02, 0x4b, 0x73, 0xbe, 0xba, 0x27, + 0x0f, 0x11, 0x08, 0x82, 0x50, 0x4e, 0x26, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, + 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, + 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, + 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, + 0xc0, 0x4e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x8a, 0x91, 0xe1, 0xe8, 0x00, 0x00, + 0x00, +} + +func (this *EnvironmentVariable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnvironmentVariable) + if !ok { + that2, ok := that.(EnvironmentVariable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *EnvironmentVariable) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EnvironmentVariable{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEnvironmentVariables(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *EnvironmentVariable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvironmentVariable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvironmentVariable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintEnvironmentVariables(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvironmentVariables(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEnvironmentVariables(dAtA []byte, offset int, v uint64) int { + offset -= sovEnvironmentVariables(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EnvironmentVariable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvironmentVariables(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovEnvironmentVariables(uint64(l)) + } + return n +} + +func sovEnvironmentVariables(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEnvironmentVariables(x uint64) (n int) { + return sovEnvironmentVariables(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EnvironmentVariable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvironmentVariable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringEnvironmentVariables(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EnvironmentVariable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvironmentVariable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvironmentVariable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvironmentVariables + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvironmentVariables + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvironmentVariables(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEnvironmentVariables(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEnvironmentVariables + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEnvironmentVariables + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEnvironmentVariables + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEnvironmentVariables = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEnvironmentVariables = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEnvironmentVariables = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto new file mode 100644 index 00000000..390aa783 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message EnvironmentVariable { + string name = 1 [(gogoproto.jsontag) = "name"]; + string value = 2 [(gogoproto.jsontag) = "value"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/error.pb.go b/vendor/code.cloudfoundry.org/bbs/models/error.pb.go new file mode 100644 index 00000000..039ca6d6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/error.pb.go @@ -0,0 +1,515 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: error.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Error_Type int32 + +const ( + Error_UnknownError Error_Type = 0 + Error_InvalidRecord Error_Type = 3 + Error_InvalidRequest Error_Type = 4 + Error_InvalidResponse Error_Type = 5 + Error_InvalidProtobufMessage Error_Type = 6 + Error_InvalidJSON Error_Type = 7 + Error_FailedToOpenEnvelope Error_Type = 8 + Error_InvalidStateTransition Error_Type = 9 + Error_ResourceConflict Error_Type = 11 + Error_ResourceExists Error_Type = 12 + Error_ResourceNotFound Error_Type = 13 + Error_RouterError Error_Type = 14 + Error_ActualLRPCannotBeClaimed Error_Type = 15 + Error_ActualLRPCannotBeStarted Error_Type = 16 + Error_ActualLRPCannotBeCrashed Error_Type = 17 + Error_ActualLRPCannotBeFailed Error_Type = 18 + Error_ActualLRPCannotBeRemoved Error_Type = 19 + Error_ActualLRPCannotBeUnclaimed Error_Type = 21 + Error_RunningOnDifferentCell Error_Type = 24 + Error_GUIDGeneration Error_Type = 26 + Error_Deserialize Error_Type = 27 + Error_Deadlock Error_Type = 28 + Error_Unrecoverable Error_Type = 29 + Error_LockCollision Error_Type = 30 + Error_Timeout Error_Type = 31 +) + +var Error_Type_name = map[int32]string{ + 0: "UnknownError", + 3: "InvalidRecord", + 4: "InvalidRequest", + 5: "InvalidResponse", + 6: "InvalidProtobufMessage", + 7: "InvalidJSON", + 8: "FailedToOpenEnvelope", + 9: "InvalidStateTransition", + 11: "ResourceConflict", + 12: "ResourceExists", + 13: "ResourceNotFound", + 14: "RouterError", + 15: "ActualLRPCannotBeClaimed", + 16: "ActualLRPCannotBeStarted", + 17: "ActualLRPCannotBeCrashed", + 18: "ActualLRPCannotBeFailed", + 19: "ActualLRPCannotBeRemoved", + 21: "ActualLRPCannotBeUnclaimed", + 24: "RunningOnDifferentCell", + 26: "GUIDGeneration", + 27: "Deserialize", + 28: "Deadlock", + 29: "Unrecoverable", + 30: "LockCollision", + 31: "Timeout", +} + +var Error_Type_value = map[string]int32{ + "UnknownError": 0, + "InvalidRecord": 3, + "InvalidRequest": 4, + "InvalidResponse": 5, + "InvalidProtobufMessage": 6, + "InvalidJSON": 7, + "FailedToOpenEnvelope": 8, + "InvalidStateTransition": 9, + "ResourceConflict": 11, + "ResourceExists": 12, + "ResourceNotFound": 13, + "RouterError": 14, + "ActualLRPCannotBeClaimed": 15, + "ActualLRPCannotBeStarted": 16, + "ActualLRPCannotBeCrashed": 17, + "ActualLRPCannotBeFailed": 18, + "ActualLRPCannotBeRemoved": 19, + "ActualLRPCannotBeUnclaimed": 21, + "RunningOnDifferentCell": 24, + "GUIDGeneration": 26, + "Deserialize": 27, + "Deadlock": 28, + "Unrecoverable": 29, + "LockCollision": 30, + "Timeout": 31, +} + +func (Error_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0579b252106fcf4a, []int{0, 0} +} + +type Error struct { + Type Error_Type `protobuf:"varint,1,opt,name=type,proto3,enum=models.Error_Type" json:"type"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"` +} + +func (m *Error) Reset() { *m = Error{} } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_0579b252106fcf4a, []int{0} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return m.Size() +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetType() Error_Type { + if m != nil { + return m.Type + } + return Error_UnknownError +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterEnum("models.Error_Type", Error_Type_name, Error_Type_value) + proto.RegisterType((*Error)(nil), "models.Error") +} + +func init() { proto.RegisterFile("error.proto", fileDescriptor_0579b252106fcf4a) } + +var fileDescriptor_0579b252106fcf4a = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0xb5, 0xf9, 0x0c, 0x98, 0x09, 0x3f, 0x97, 0x21, 0x1f, 0x84, 0x40, 0x07, 0x64, 0xa9, 0x12, + 0x9b, 0x86, 0xaa, 0xed, 0x0b, 0x34, 0x09, 0x20, 0x2a, 0x0a, 0xc8, 0x24, 0x0f, 0x30, 0xb1, 0x6f, + 0xc2, 0x88, 0xc9, 0x4c, 0x3a, 0x1e, 0xa7, 0xa5, 0xab, 0x3e, 0x42, 0x1f, 0xa3, 0x8f, 0xd2, 0x45, + 0x17, 0x2c, 0x59, 0xa1, 0x62, 0x36, 0x2d, 0x2b, 0x1e, 0xa1, 0xb2, 0x13, 0x10, 0x12, 0x6c, 0xac, + 0x7b, 0xcf, 0xb9, 0xe7, 0xf8, 0x9e, 0x6b, 0x99, 0x94, 0xd0, 0x18, 0x6d, 0x6a, 0x03, 0xa3, 0xad, + 0xa6, 0x53, 0x7d, 0x1d, 0xa3, 0x4c, 0xaa, 0xaf, 0x7a, 0xc2, 0x9e, 0xa6, 0x9d, 0x5a, 0xa4, 0xfb, + 0xdb, 0x3d, 0xdd, 0xd3, 0xdb, 0x05, 0xdd, 0x49, 0xbb, 0x45, 0x57, 0x34, 0x45, 0x35, 0x92, 0x05, + 0xbf, 0x26, 0xc9, 0xe4, 0x4e, 0x6e, 0x43, 0x5f, 0x13, 0xcf, 0x9e, 0x0f, 0xb0, 0xe2, 0x6e, 0xba, + 0x5b, 0xf3, 0x6f, 0x68, 0x6d, 0xe4, 0x57, 0x2b, 0xc8, 0x5a, 0xeb, 0x7c, 0x80, 0x75, 0xff, 0xf6, + 0x6a, 0xa3, 0x98, 0x09, 0x8b, 0x27, 0x7d, 0x49, 0xa6, 0xfb, 0x98, 0x24, 0xbc, 0x87, 0x95, 0x89, + 0x4d, 0x77, 0x6b, 0xa6, 0x5e, 0xba, 0xbd, 0xda, 0xb8, 0x87, 0xc2, 0xfb, 0x22, 0xf8, 0xeb, 0x11, + 0x2f, 0xd7, 0x53, 0x20, 0xb3, 0x6d, 0x75, 0xa6, 0xf4, 0x67, 0x55, 0x98, 0x82, 0x43, 0x17, 0xc9, + 0xdc, 0xbe, 0x1a, 0x72, 0x29, 0xe2, 0x10, 0x23, 0x6d, 0x62, 0xf8, 0x8f, 0x52, 0x32, 0xff, 0x00, + 0x7d, 0x4a, 0x31, 0xb1, 0xe0, 0xd1, 0x25, 0xb2, 0xf0, 0x80, 0x25, 0x03, 0xad, 0x12, 0x84, 0x49, + 0x5a, 0x25, 0xcb, 0x63, 0xf0, 0x78, 0x9c, 0xf0, 0xe3, 0xe8, 0x85, 0x30, 0x45, 0x17, 0x48, 0x69, + 0xcc, 0x7d, 0x38, 0x39, 0x3a, 0x84, 0x69, 0x5a, 0x21, 0xe5, 0x5d, 0x2e, 0x24, 0xc6, 0x2d, 0x7d, + 0x34, 0x40, 0xb5, 0xa3, 0x86, 0x28, 0xf5, 0x00, 0xc1, 0x7f, 0x64, 0x73, 0x62, 0xb9, 0xc5, 0x96, + 0xe1, 0x2a, 0x11, 0x56, 0x68, 0x05, 0x33, 0xb4, 0x4c, 0x20, 0xc4, 0x44, 0xa7, 0x26, 0xc2, 0x86, + 0x56, 0x5d, 0x29, 0x22, 0x0b, 0xa5, 0x7c, 0xc3, 0x7b, 0x74, 0xe7, 0x8b, 0x48, 0x6c, 0x02, 0xb3, + 0x8f, 0x27, 0x0f, 0xb5, 0xdd, 0xd5, 0xa9, 0x8a, 0x61, 0x2e, 0x5f, 0x23, 0xd4, 0xa9, 0x45, 0x33, + 0xca, 0x3b, 0x4f, 0xd7, 0x49, 0xe5, 0x7d, 0x64, 0x53, 0x2e, 0x0f, 0xc2, 0xe3, 0x06, 0x57, 0x4a, + 0xdb, 0x3a, 0x36, 0x24, 0x17, 0x7d, 0x8c, 0x61, 0xe1, 0x59, 0xf6, 0xc4, 0x72, 0x63, 0x31, 0x06, + 0x78, 0x5e, 0x6b, 0x78, 0x72, 0x8a, 0x31, 0x2c, 0xd2, 0x35, 0xb2, 0xf2, 0x84, 0x1d, 0x25, 0x06, + 0xfa, 0xac, 0x34, 0xc4, 0xbe, 0x1e, 0x62, 0x0c, 0x4b, 0x94, 0x91, 0xea, 0x13, 0xb6, 0xad, 0xa2, + 0xf1, 0x5a, 0xff, 0xe7, 0x17, 0x0a, 0x53, 0xa5, 0x84, 0xea, 0x1d, 0xa9, 0xa6, 0xe8, 0x76, 0xd1, + 0xa0, 0xb2, 0x0d, 0x94, 0x12, 0x2a, 0xf9, 0x2d, 0xf6, 0xda, 0xfb, 0xcd, 0x3d, 0x54, 0x68, 0x78, + 0x71, 0xb5, 0x6a, 0x9e, 0xba, 0x89, 0x09, 0x1a, 0xc1, 0xa5, 0xf8, 0x8a, 0xb0, 0x46, 0x67, 0x89, + 0xdf, 0x44, 0x1e, 0x4b, 0x1d, 0x9d, 0xc1, 0x7a, 0xfe, 0xcd, 0xdb, 0xca, 0x60, 0xa4, 0x87, 0x68, + 0x78, 0x47, 0x22, 0xbc, 0xc8, 0xa1, 0x03, 0x1d, 0x9d, 0x35, 0xb4, 0x94, 0x22, 0xc9, 0x4d, 0x18, + 0x2d, 0x91, 0xe9, 0x96, 0xe8, 0xa3, 0x4e, 0x2d, 0x6c, 0x04, 0x9e, 0xef, 0x82, 0x1b, 0x78, 0xfe, + 0x04, 0x4c, 0x04, 0x9e, 0x4f, 0x80, 0x04, 0x9e, 0x5f, 0x86, 0x72, 0xe0, 0xf9, 0xcb, 0xb0, 0x1c, + 0x78, 0xfe, 0x0a, 0xac, 0x04, 0x9e, 0xbf, 0x0a, 0xab, 0xf5, 0x77, 0x17, 0xd7, 0xcc, 0xbd, 0xbc, + 0x66, 0xce, 0xdd, 0x35, 0x73, 0xbf, 0x65, 0xcc, 0xfd, 0x91, 0x31, 0xe7, 0x67, 0xc6, 0xdc, 0x8b, + 0x8c, 0xb9, 0xbf, 0x33, 0xe6, 0xfe, 0xc9, 0x98, 0x73, 0x97, 0x31, 0xf7, 0xfb, 0x0d, 0x73, 0x2e, + 0x6e, 0x98, 0x73, 0x79, 0xc3, 0x9c, 0xce, 0x54, 0xf1, 0x2f, 0xbc, 0xfd, 0x17, 0x00, 0x00, 0xff, + 0xff, 0xf2, 0xb4, 0xa0, 0xf2, 0x51, 0x03, 0x00, 0x00, +} + +func (x Error_Type) String() string { + s, ok := Error_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Error) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Error{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringError(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Error) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Error) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintError(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintError(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintError(dAtA []byte, offset int, v uint64) int { + offset -= sovError(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovError(uint64(m.Type)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovError(uint64(l)) + } + return n +} + +func sovError(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozError(x uint64) (n int) { + return sovError(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Error) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Error{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringError(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Error) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Error: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Error_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthError + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthError + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipError(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthError + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipError(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthError + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupError + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthError + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthError = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowError = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupError = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/error.proto b/vendor/code.cloudfoundry.org/bbs/models/error.proto new file mode 100644 index 00000000..7aa1a9c8 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/error.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = false; +option (gogoproto.goproto_enum_prefix_all) = true; + +message Error { + enum Type { + reserved 1, 2, 10, 20, 22, 23, 25; // previously used and removed values + + UnknownError = 0; + + InvalidRecord = 3; + InvalidRequest = 4; + InvalidResponse = 5; + InvalidProtobufMessage = 6; + InvalidJSON = 7; + FailedToOpenEnvelope = 8; + InvalidStateTransition = 9; + + ResourceConflict = 11; + ResourceExists = 12; + ResourceNotFound = 13; + RouterError = 14; + + ActualLRPCannotBeClaimed = 15; + ActualLRPCannotBeStarted = 16; + ActualLRPCannotBeCrashed = 17; + ActualLRPCannotBeFailed = 18; + ActualLRPCannotBeRemoved = 19; + ActualLRPCannotBeUnclaimed = 21; + + RunningOnDifferentCell = 24; + + GUIDGeneration = 26; + + Deserialize = 27; + + Deadlock = 28; + Unrecoverable = 29; + + LockCollision = 30; + + Timeout = 31; + } + + Type type = 1 [(gogoproto.jsontag) = "type"]; + string message = 2 [(gogoproto.jsontag) = "message"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/errors.go b/vendor/code.cloudfoundry.org/bbs/models/errors.go new file mode 100644 index 00000000..fec8e3f7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/errors.go @@ -0,0 +1,186 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" +) + +func NewError(errType Error_Type, msg string) *Error { + return &Error{ + Type: errType, + Message: msg, + } +} + +func ConvertError(err error) *Error { + if err == nil { + return nil + } + + modelErr, ok := err.(*Error) + if !ok { + modelErr = NewError(Error_UnknownError, err.Error()) + } + return modelErr +} + +func (err *Error) ToError() error { + if err == nil { + return nil + } + return err +} + +func (err *Error) Equal(other error) bool { + if e, ok := other.(*Error); ok { + if err == nil && e != nil { + return false + } + return e.GetType() == err.GetType() + } + return false +} + +func (err *Error) Error() string { + return err.GetMessage() +} + +func (d *Error_Type) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := Error_Type_value[name]; found { + *d = Error_Type(v) + return nil + } + return fmt.Errorf("invalid presence: %s", name) +} + +func (d Error_Type) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +var ( + ErrResourceNotFound = &Error{ + Type: Error_ResourceNotFound, + Message: "the requested resource could not be found", + } + + ErrResourceExists = &Error{ + Type: Error_ResourceExists, + Message: "the requested resource already exists", + } + + ErrResourceConflict = &Error{ + Type: Error_ResourceConflict, + Message: "the requested resource is in a conflicting state", + } + + ErrDeadlock = &Error{ + Type: Error_Deadlock, + Message: "the request failed due to deadlock", + } + + ErrBadRequest = &Error{ + Type: Error_InvalidRequest, + Message: "the request received is invalid", + } + + ErrUnknownError = &Error{ + Type: Error_UnknownError, + Message: "the request failed for an unknown reason", + } + + ErrDeserialize = &Error{ + Type: Error_Deserialize, + Message: "could not deserialize record", + } + + ErrFailedToOpenEnvelope = &Error{ + Type: Error_FailedToOpenEnvelope, + Message: "could not open envelope", + } + + ErrActualLRPCannotBeClaimed = &Error{ + Type: Error_ActualLRPCannotBeClaimed, + Message: "cannot claim actual LRP", + } + + ErrActualLRPCannotBeStarted = &Error{ + Type: Error_ActualLRPCannotBeStarted, + Message: "cannot start actual LRP", + } + + ErrActualLRPCannotBeCrashed = &Error{ + Type: Error_ActualLRPCannotBeCrashed, + Message: "cannot crash actual LRP", + } + + ErrActualLRPCannotBeFailed = &Error{ + Type: Error_ActualLRPCannotBeFailed, + Message: "cannot fail actual LRP", + } + + ErrActualLRPCannotBeRemoved = &Error{ + Type: Error_ActualLRPCannotBeRemoved, + Message: "cannot remove actual LRP", + } + + ErrActualLRPCannotBeUnclaimed = &Error{ + Type: Error_ActualLRPCannotBeUnclaimed, + Message: "cannot unclaim actual LRP", + } + + ErrGUIDGeneration = &Error{ + Type: Error_GUIDGeneration, + Message: "cannot generate random guid", + } + + ErrLockCollision = &Error{ + Type: Error_LockCollision, + Message: "lock already exists", + } +) + +type ErrInvalidField struct { + Field string +} + +func (err ErrInvalidField) Error() string { + return "Invalid field: " + err.Field +} + +type ErrInvalidModification struct { + InvalidField string +} + +func (err ErrInvalidModification) Error() string { + return "attempt to make invalid change to field: " + err.InvalidField +} + +// Deprecated: use the ActualLRPInstance API instead +var ErrActualLRPGroupInvalid = errors.New("ActualLRPGroup invalid") + +func NewTaskTransitionError(from, to Task_State) *Error { + return &Error{ + Type: Error_InvalidStateTransition, + Message: fmt.Sprintf("Cannot transition from %s to %s", from.String(), to.String()), + } +} + +func NewRunningOnDifferentCellError(expectedCellId, actualCellId string) *Error { + return &Error{ + Type: Error_RunningOnDifferentCell, + Message: fmt.Sprintf("Running on cell %s not %s", actualCellId, expectedCellId), + } +} + +func NewUnrecoverableError(err error) *Error { + return &Error{ + Type: Error_Unrecoverable, + Message: fmt.Sprint("Unrecoverable Error: ", err), + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.go b/vendor/code.cloudfoundry.org/bbs/models/evacuation.go new file mode 100644 index 00000000..82aa828e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.go @@ -0,0 +1,12 @@ +package models + +func (request *EvacuateRunningActualLRPRequest) SetRoutable(routable bool) { + request.OptionalRoutable = &EvacuateRunningActualLRPRequest_Routable{ + Routable: routable, + } +} + +func (request *EvacuateRunningActualLRPRequest) RoutableExists() bool { + _, ok := request.GetOptionalRoutable().(*EvacuateRunningActualLRPRequest_Routable) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go b/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go new file mode 100644 index 00000000..b759f031 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go @@ -0,0 +1,2503 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: evacuation.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EvacuationResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + KeepContainer bool `protobuf:"varint,2,opt,name=keep_container,json=keepContainer,proto3" json:"keep_container"` +} + +func (m *EvacuationResponse) Reset() { *m = EvacuationResponse{} } +func (*EvacuationResponse) ProtoMessage() {} +func (*EvacuationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{0} +} +func (m *EvacuationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuationResponse.Merge(m, src) +} +func (m *EvacuationResponse) XXX_Size() int { + return m.Size() +} +func (m *EvacuationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuationResponse proto.InternalMessageInfo + +func (m *EvacuationResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *EvacuationResponse) GetKeepContainer() bool { + if m != nil { + return m.KeepContainer + } + return false +} + +type EvacuateClaimedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *EvacuateClaimedActualLRPRequest) Reset() { *m = EvacuateClaimedActualLRPRequest{} } +func (*EvacuateClaimedActualLRPRequest) ProtoMessage() {} +func (*EvacuateClaimedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{1} +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateClaimedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateClaimedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateClaimedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateClaimedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateClaimedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateClaimedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateClaimedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type EvacuateRunningActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ActualLrpNetInfo *ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3" json:"actual_lrp_net_info,omitempty"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,5,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,6,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *EvacuateRunningActualLRPRequest_Routable + OptionalRoutable isEvacuateRunningActualLRPRequest_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,8,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *EvacuateRunningActualLRPRequest) Reset() { *m = EvacuateRunningActualLRPRequest{} } +func (*EvacuateRunningActualLRPRequest) ProtoMessage() {} +func (*EvacuateRunningActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{2} +} +func (m *EvacuateRunningActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateRunningActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateRunningActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateRunningActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateRunningActualLRPRequest.Merge(m, src) +} +func (m *EvacuateRunningActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateRunningActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateRunningActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateRunningActualLRPRequest proto.InternalMessageInfo + +type isEvacuateRunningActualLRPRequest_OptionalRoutable interface { + isEvacuateRunningActualLRPRequest_OptionalRoutable() + MarshalTo([]byte) (int, error) + Size() int +} + +type EvacuateRunningActualLRPRequest_Routable struct { + Routable bool `protobuf:"varint,7,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*EvacuateRunningActualLRPRequest_Routable) isEvacuateRunningActualLRPRequest_OptionalRoutable() { +} + +func (m *EvacuateRunningActualLRPRequest) GetOptionalRoutable() isEvacuateRunningActualLRPRequest_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpNetInfo() *ActualLRPNetInfo { + if m != nil { + return m.ActualLrpNetInfo + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*EvacuateRunningActualLRPRequest_Routable); ok { + return x.Routable + } + return false +} + +func (m *EvacuateRunningActualLRPRequest) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*EvacuateRunningActualLRPRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*EvacuateRunningActualLRPRequest_Routable)(nil), + } +} + +type EvacuateStoppedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *EvacuateStoppedActualLRPRequest) Reset() { *m = EvacuateStoppedActualLRPRequest{} } +func (*EvacuateStoppedActualLRPRequest) ProtoMessage() {} +func (*EvacuateStoppedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{3} +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateStoppedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateStoppedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateStoppedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateStoppedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateStoppedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateStoppedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateStoppedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type EvacuateCrashedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *EvacuateCrashedActualLRPRequest) Reset() { *m = EvacuateCrashedActualLRPRequest{} } +func (*EvacuateCrashedActualLRPRequest) ProtoMessage() {} +func (*EvacuateCrashedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{4} +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateCrashedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateCrashedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateCrashedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateCrashedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateCrashedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateCrashedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateCrashedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *EvacuateCrashedActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type RemoveEvacuatingActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *RemoveEvacuatingActualLRPRequest) Reset() { *m = RemoveEvacuatingActualLRPRequest{} } +func (*RemoveEvacuatingActualLRPRequest) ProtoMessage() {} +func (*RemoveEvacuatingActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{5} +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveEvacuatingActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEvacuatingActualLRPRequest.Merge(m, src) +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEvacuatingActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEvacuatingActualLRPRequest proto.InternalMessageInfo + +func (m *RemoveEvacuatingActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *RemoveEvacuatingActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type RemoveEvacuatingActualLRPResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *RemoveEvacuatingActualLRPResponse) Reset() { *m = RemoveEvacuatingActualLRPResponse{} } +func (*RemoveEvacuatingActualLRPResponse) ProtoMessage() {} +func (*RemoveEvacuatingActualLRPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{6} +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveEvacuatingActualLRPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEvacuatingActualLRPResponse.Merge(m, src) +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEvacuatingActualLRPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEvacuatingActualLRPResponse proto.InternalMessageInfo + +func (m *RemoveEvacuatingActualLRPResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func init() { + proto.RegisterType((*EvacuationResponse)(nil), "models.EvacuationResponse") + proto.RegisterType((*EvacuateClaimedActualLRPRequest)(nil), "models.EvacuateClaimedActualLRPRequest") + proto.RegisterType((*EvacuateRunningActualLRPRequest)(nil), "models.EvacuateRunningActualLRPRequest") + proto.RegisterMapType((map[string]string)(nil), "models.EvacuateRunningActualLRPRequest.MetricTagsEntry") + proto.RegisterType((*EvacuateStoppedActualLRPRequest)(nil), "models.EvacuateStoppedActualLRPRequest") + proto.RegisterType((*EvacuateCrashedActualLRPRequest)(nil), "models.EvacuateCrashedActualLRPRequest") + proto.RegisterType((*RemoveEvacuatingActualLRPRequest)(nil), "models.RemoveEvacuatingActualLRPRequest") + proto.RegisterType((*RemoveEvacuatingActualLRPResponse)(nil), "models.RemoveEvacuatingActualLRPResponse") +} + +func init() { proto.RegisterFile("evacuation.proto", fileDescriptor_5cec7f656fd69c9d) } + +var fileDescriptor_5cec7f656fd69c9d = []byte{ + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0xf5, 0x00, 0xe1, 0x4b, 0x26, 0xc0, 0x17, 0x06, 0x2a, 0xac, 0x08, 0x4d, 0xd2, 0x74, 0x93, + 0x4d, 0x83, 0x44, 0xab, 0xfe, 0x20, 0x75, 0xd1, 0x20, 0x54, 0x28, 0x50, 0x55, 0x43, 0x17, 0x55, + 0xbb, 0xb0, 0x26, 0xe1, 0x62, 0x2c, 0xec, 0x19, 0xd7, 0x1e, 0x47, 0x4a, 0x57, 0x7d, 0x84, 0x3e, + 0x46, 0xd7, 0xed, 0x4b, 0x74, 0xc9, 0x92, 0x15, 0x2a, 0x66, 0x53, 0x65, 0x51, 0xf1, 0x08, 0x95, + 0xc7, 0x89, 0x31, 0x50, 0x21, 0x75, 0xd7, 0xec, 0xe6, 0x9c, 0x7b, 0xef, 0xb9, 0x47, 0x33, 0x73, + 0x67, 0x70, 0x05, 0x7a, 0xbc, 0x1b, 0x71, 0xe5, 0x48, 0xd1, 0xf2, 0x03, 0xa9, 0x24, 0x99, 0xf6, + 0xe4, 0x3e, 0xb8, 0x61, 0xf5, 0xbe, 0xed, 0xa8, 0xc3, 0xa8, 0xd3, 0xea, 0x4a, 0x6f, 0xc5, 0x96, + 0xb6, 0x5c, 0xd1, 0xe1, 0x4e, 0x74, 0xa0, 0x91, 0x06, 0x7a, 0x95, 0x96, 0x55, 0x2b, 0xbc, 0xab, + 0x22, 0xee, 0x5a, 0x6e, 0xe0, 0x0f, 0x99, 0x32, 0x04, 0x81, 0x0c, 0x52, 0xd0, 0x50, 0x98, 0x6c, + 0x64, 0x9d, 0x18, 0x84, 0xbe, 0x14, 0x21, 0x90, 0x7b, 0xb8, 0xa0, 0x93, 0x4c, 0x54, 0x47, 0xcd, + 0xf2, 0xea, 0x6c, 0x2b, 0xed, 0xdd, 0xda, 0x48, 0x48, 0x96, 0xc6, 0xc8, 0x53, 0x3c, 0x77, 0x04, + 0xe0, 0x5b, 0x5d, 0x29, 0x14, 0x77, 0x04, 0x04, 0xe6, 0x44, 0x1d, 0x35, 0x8b, 0x6d, 0x32, 0x38, + 0xad, 0x5d, 0x8b, 0xb0, 0xd9, 0x04, 0xaf, 0x8f, 0x60, 0xe3, 0x2b, 0xc2, 0xb5, 0x61, 0x5b, 0x58, + 0x77, 0xb9, 0xe3, 0xc1, 0xfe, 0x73, 0x6d, 0x73, 0x87, 0xbd, 0x66, 0xf0, 0x21, 0x82, 0x50, 0x91, + 0x35, 0x3c, 0x77, 0x69, 0xdd, 0x3a, 0x82, 0xfe, 0xd0, 0xcc, 0xe2, 0xc8, 0x4c, 0x56, 0xb1, 0x0d, + 0x7d, 0x36, 0x93, 0xe6, 0xee, 0x04, 0xfe, 0x36, 0xf4, 0xc9, 0x1e, 0x5e, 0xca, 0xd5, 0x3a, 0x22, + 0x54, 0x5c, 0x74, 0x41, 0x8b, 0x4c, 0x68, 0x91, 0xe5, 0x1b, 0x22, 0x5b, 0xc3, 0xa4, 0x44, 0x6c, + 0x31, 0x13, 0xcb, 0xb1, 0x8d, 0x5f, 0x53, 0x97, 0xa6, 0x59, 0x24, 0x84, 0x23, 0xec, 0x7f, 0xde, + 0x34, 0x79, 0x81, 0x17, 0x72, 0xa2, 0x02, 0x94, 0xe5, 0x88, 0x03, 0x69, 0x4e, 0x6a, 0x41, 0xf3, + 0x86, 0xe0, 0x2b, 0x50, 0x5b, 0xe2, 0x40, 0xb2, 0x4a, 0x26, 0x36, 0x64, 0xc8, 0x7b, 0x5c, 0xbd, + 0xe2, 0x4e, 0x41, 0x20, 0xb8, 0x6b, 0x05, 0x32, 0x52, 0x10, 0x9a, 0x85, 0xfa, 0x64, 0xb3, 0xbc, + 0x4a, 0xff, 0x60, 0x30, 0xcd, 0x63, 0x49, 0x1a, 0x5b, 0xca, 0x59, 0xcc, 0xf1, 0x21, 0x79, 0x8b, + 0xcb, 0x1e, 0xa8, 0xc0, 0xe9, 0x5a, 0x8a, 0xdb, 0xa1, 0x39, 0xad, 0xd5, 0x1e, 0x67, 0xb7, 0xee, + 0xf6, 0x4d, 0x6f, 0xed, 0xea, 0xd2, 0x37, 0xdc, 0x0e, 0x37, 0x84, 0x0a, 0xfa, 0x0c, 0x7b, 0x19, + 0x41, 0x96, 0x71, 0x31, 0xe9, 0xc1, 0x3b, 0x2e, 0x98, 0xff, 0x25, 0xd7, 0x73, 0xd3, 0x60, 0x19, + 0x43, 0xda, 0x78, 0x9e, 0xf7, 0xb8, 0xe3, 0xf2, 0x8e, 0xe3, 0x3a, 0xaa, 0x6f, 0x7d, 0x94, 0x02, + 0xcc, 0x62, 0x1d, 0x35, 0x4b, 0xed, 0x3b, 0x83, 0xd3, 0xda, 0xcd, 0x20, 0xab, 0xe4, 0xa9, 0x77, + 0x52, 0x40, 0xf5, 0x19, 0xfe, 0xff, 0x9a, 0x01, 0x52, 0xc1, 0x93, 0xa3, 0xa3, 0x2f, 0xb1, 0x64, + 0x49, 0x16, 0x71, 0xa1, 0xc7, 0xdd, 0x08, 0xf4, 0x49, 0x96, 0x58, 0x0a, 0xd6, 0x26, 0x9e, 0xa0, + 0xf6, 0x02, 0x9e, 0x97, 0x7e, 0x32, 0x7c, 0xc3, 0xcd, 0x4c, 0x7c, 0xbd, 0x9c, 0x2a, 0x4e, 0x55, + 0x0a, 0x57, 0xa6, 0x64, 0x4f, 0x49, 0xdf, 0x1f, 0x87, 0x29, 0x19, 0xe4, 0x47, 0x3b, 0xe0, 0xe1, + 0xe1, 0x18, 0x98, 0x26, 0x8f, 0xf0, 0xac, 0x7e, 0xd3, 0x2c, 0x0f, 0xc2, 0x90, 0xdb, 0xa0, 0xe7, + 0xa3, 0xd4, 0x9e, 0x1f, 0x9c, 0xd6, 0xae, 0x06, 0xd8, 0x8c, 0x86, 0xbb, 0x29, 0x6a, 0x7c, 0x43, + 0xb8, 0xce, 0xc0, 0x93, 0x3d, 0x18, 0x3d, 0xa2, 0x63, 0xf0, 0x26, 0x34, 0x36, 0xf1, 0xdd, 0x5b, + 0x4c, 0xff, 0xc5, 0x17, 0xd0, 0x7e, 0x78, 0x7c, 0x46, 0x8d, 0x93, 0x33, 0x6a, 0x5c, 0x9c, 0x51, + 0xf4, 0x29, 0xa6, 0xe8, 0x4b, 0x4c, 0x8d, 0xef, 0x31, 0x45, 0xc7, 0x31, 0x45, 0x3f, 0x62, 0x8a, + 0x7e, 0xc6, 0xd4, 0xb8, 0x88, 0x29, 0xfa, 0x7c, 0x4e, 0x8d, 0xe3, 0x73, 0x6a, 0x9c, 0x9c, 0x53, + 0xa3, 0x33, 0xad, 0xbf, 0x9e, 0x07, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x02, 0x9c, 0x2f, 0xec, + 0xe4, 0x06, 0x00, 0x00, +} + +func (this *EvacuationResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuationResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "KeepContainer: "+fmt.Sprintf("%#v", this.KeepContainer)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateClaimedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuateClaimedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateRunningActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.EvacuateRunningActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + if this.ActualLrpNetInfo != nil { + s = append(s, "ActualLrpNetInfo: "+fmt.Sprintf("%#v", this.ActualLrpNetInfo)+",\n") + } + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateRunningActualLRPRequest_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.EvacuateRunningActualLRPRequest_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *EvacuateStoppedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuateStoppedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateCrashedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.EvacuateCrashedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveEvacuatingActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.RemoveEvacuatingActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveEvacuatingActualLRPResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RemoveEvacuatingActualLRPResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEvacuation(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *EvacuationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.KeepContainer { + i-- + if m.KeepContainer { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateClaimedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateClaimedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateClaimedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateRunningActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateRunningActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateRunningActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintEvacuation(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x42 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintEvacuation(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintEvacuation(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintEvacuation(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.ActualLrpNetInfo != nil { + { + size, err := m.ActualLrpNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateRunningActualLRPRequest_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateRunningActualLRPRequest_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *EvacuateStoppedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateStoppedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateStoppedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateCrashedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateCrashedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateCrashedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintEvacuation(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveEvacuatingActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveEvacuatingActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveEvacuatingActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveEvacuatingActualLRPResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveEvacuatingActualLRPResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveEvacuatingActualLRPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvacuation(dAtA []byte, offset int, v uint64) int { + offset -= sovEvacuation(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EvacuationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.KeepContainer { + n += 2 + } + return n +} + +func (m *EvacuateClaimedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateRunningActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpNetInfo != nil { + l = m.ActualLrpNetInfo.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEvacuation(uint64(len(k))) + 1 + len(v) + sovEvacuation(uint64(len(v))) + n += mapEntrySize + 1 + sovEvacuation(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateRunningActualLRPRequest_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *EvacuateStoppedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateCrashedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *RemoveEvacuatingActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *RemoveEvacuatingActualLRPResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func sovEvacuation(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvacuation(x uint64) (n int) { + return sovEvacuation(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EvacuationResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuationResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `KeepContainer:` + fmt.Sprintf("%v", this.KeepContainer) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateClaimedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateClaimedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateRunningActualLRPRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&EvacuateRunningActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ActualLrpNetInfo:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateRunningActualLRPRequest_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateRunningActualLRPRequest_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateStoppedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateStoppedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateCrashedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateCrashedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveEvacuatingActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveEvacuatingActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveEvacuatingActualLRPResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveEvacuatingActualLRPResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringEvacuation(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EvacuationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepContainer", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepContainer = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateClaimedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateClaimedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateClaimedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateRunningActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateRunningActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateRunningActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpNetInfo == nil { + m.ActualLrpNetInfo = &ActualLRPNetInfo{} + } + if err := m.ActualLrpNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEvacuation + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthEvacuation + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEvacuation + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthEvacuation + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &EvacuateRunningActualLRPRequest_Routable{b} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateStoppedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateStoppedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateStoppedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateCrashedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateCrashedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateCrashedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveEvacuatingActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveEvacuatingActualLRPResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvacuation(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvacuation + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvacuation + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvacuation + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvacuation = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvacuation = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvacuation = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto b/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto new file mode 100644 index 00000000..85f4883d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "error.proto"; + +option (gogoproto.equal_all) = false; + +message EvacuationResponse { + Error error = 1; + bool keep_container = 2 [(gogoproto.jsontag) = "keep_container"]; +} + +message EvacuateClaimedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message EvacuateRunningActualLRPRequest { + reserved 4; // previously removed ttl value + + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + ActualLRPNetInfo actual_lrp_net_info = 3; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 5; + map metric_tags = 6; + oneof optional_routable { + bool Routable = 7; + } + string availability_zone = 8 [(gogoproto.jsontag) = "availability_zone"]; +} + +message EvacuateStoppedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message EvacuateCrashedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + string error_message = 3 [(gogoproto.jsontag) = "error_message"]; +} + +message RemoveEvacuatingActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message RemoveEvacuatingActualLRPResponse { + Error error = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.go b/vendor/code.cloudfoundry.org/bbs/models/events.go new file mode 100644 index 00000000..d09fa1f0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.go @@ -0,0 +1,329 @@ +package models + +import ( + "code.cloudfoundry.org/bbs/format" + "github.com/gogo/protobuf/proto" +) + +type Event interface { + EventType() string + Key() string + proto.Message +} + +const ( + EventTypeInvalid = "" + + EventTypeDesiredLRPCreated = "desired_lrp_created" + EventTypeDesiredLRPChanged = "desired_lrp_changed" + EventTypeDesiredLRPRemoved = "desired_lrp_removed" + + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPCreated = "actual_lrp_created" + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPChanged = "actual_lrp_changed" + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPRemoved = "actual_lrp_removed" + EventTypeActualLRPCrashed = "actual_lrp_crashed" + + EventTypeActualLRPInstanceCreated = "actual_lrp_instance_created" + EventTypeActualLRPInstanceChanged = "actual_lrp_instance_changed" + EventTypeActualLRPInstanceRemoved = "actual_lrp_instance_removed" + + EventTypeTaskCreated = "task_created" + EventTypeTaskChanged = "task_changed" + EventTypeTaskRemoved = "task_removed" +) + +// Downgrade the DesiredLRPEvent payload (i.e. DesiredLRP(s)) to the given +// target version +func VersionDesiredLRPsTo(event Event, target format.Version) Event { + switch event := event.(type) { + case *DesiredLRPCreatedEvent: + return NewDesiredLRPCreatedEvent(event.DesiredLrp.VersionDownTo(target), event.TraceId) + case *DesiredLRPRemovedEvent: + return NewDesiredLRPRemovedEvent(event.DesiredLrp.VersionDownTo(target), event.TraceId) + case *DesiredLRPChangedEvent: + return NewDesiredLRPChangedEvent( + event.Before.VersionDownTo(target), + event.After.VersionDownTo(target), + event.TraceId, + ) + default: + return event + } +} + +// Downgrade the TaskEvent payload (i.e. Task(s)) to the given target version +func VersionTaskDefinitionsTo(event Event, target format.Version) Event { + switch event := event.(type) { + case *TaskCreatedEvent: + return NewTaskCreatedEvent(event.Task.VersionDownTo(target)) + case *TaskRemovedEvent: + return NewTaskRemovedEvent(event.Task.VersionDownTo(target)) + case *TaskChangedEvent: + return NewTaskChangedEvent(event.Before.VersionDownTo(target), event.After.VersionDownTo(target)) + default: + return event + } +} + +func NewDesiredLRPCreatedEvent(desiredLRP *DesiredLRP, traceId string) *DesiredLRPCreatedEvent { + return &DesiredLRPCreatedEvent{ + DesiredLrp: desiredLRP, + TraceId: traceId, + } +} + +func (event *DesiredLRPCreatedEvent) EventType() string { + return EventTypeDesiredLRPCreated +} + +func (event *DesiredLRPCreatedEvent) Key() string { + return event.DesiredLrp.GetProcessGuid() +} + +func NewDesiredLRPChangedEvent(before, after *DesiredLRP, traceId string) *DesiredLRPChangedEvent { + return &DesiredLRPChangedEvent{ + Before: before, + After: after, + TraceId: traceId, + } +} + +func (event *DesiredLRPChangedEvent) EventType() string { + return EventTypeDesiredLRPChanged +} + +func (event *DesiredLRPChangedEvent) Key() string { + return event.Before.GetProcessGuid() +} + +func NewDesiredLRPRemovedEvent(desiredLRP *DesiredLRP, traceId string) *DesiredLRPRemovedEvent { + return &DesiredLRPRemovedEvent{ + DesiredLrp: desiredLRP, + TraceId: traceId, + } +} + +func (event *DesiredLRPRemovedEvent) EventType() string { + return EventTypeDesiredLRPRemoved +} + +func (event DesiredLRPRemovedEvent) Key() string { + return event.DesiredLrp.GetProcessGuid() +} + +// FIXME: change the signature +func NewActualLRPInstanceChangedEvent(before, after *ActualLRP, traceId string) *ActualLRPInstanceChangedEvent { + var ( + actualLRPKey ActualLRPKey + actualLRPInstanceKey ActualLRPInstanceKey + ) + + if (before != nil && before.ActualLRPKey != ActualLRPKey{}) { + actualLRPKey = before.ActualLRPKey + } + if (after != nil && after.ActualLRPKey != ActualLRPKey{}) { + actualLRPKey = after.ActualLRPKey + } + + if (before != nil && before.ActualLRPInstanceKey != ActualLRPInstanceKey{}) { + actualLRPInstanceKey = before.ActualLRPInstanceKey + } + if (after != nil && after.ActualLRPInstanceKey != ActualLRPInstanceKey{}) { + actualLRPInstanceKey = after.ActualLRPInstanceKey + } + + return &ActualLRPInstanceChangedEvent{ + ActualLRPKey: actualLRPKey, + ActualLRPInstanceKey: actualLRPInstanceKey, + Before: before.ToActualLRPInfo(), + After: after.ToActualLRPInfo(), + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceChangedEvent) EventType() string { + return EventTypeActualLRPInstanceChanged +} + +func (event *ActualLRPInstanceChangedEvent) Key() string { + return event.GetInstanceGuid() +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPChangedEvent(before, after *ActualLRPGroup) *ActualLRPChangedEvent { + return &ActualLRPChangedEvent{ + Before: before, + After: after, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPChangedEvent) EventType() string { + return EventTypeActualLRPChanged +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPChangedEvent) Key() string { + actualLRP, _, resolveError := event.Before.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPCrashedEvent(before, after *ActualLRP) *ActualLRPCrashedEvent { + return &ActualLRPCrashedEvent{ + ActualLRPKey: after.ActualLRPKey, + ActualLRPInstanceKey: before.ActualLRPInstanceKey, + CrashCount: after.CrashCount, + CrashReason: after.CrashReason, + Since: after.Since, + } +} + +func (event *ActualLRPCrashedEvent) EventType() string { + return EventTypeActualLRPCrashed +} + +func (event *ActualLRPCrashedEvent) Key() string { + return event.ActualLRPInstanceKey.InstanceGuid +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPRemovedEvent(actualLRPGroup *ActualLRPGroup) *ActualLRPRemovedEvent { + return &ActualLRPRemovedEvent{ + ActualLrpGroup: actualLRPGroup, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPRemovedEvent) EventType() string { + return EventTypeActualLRPRemoved +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPRemovedEvent) Key() string { + actualLRP, _, resolveError := event.ActualLrpGroup.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPInstanceRemovedEvent(actualLrp *ActualLRP, traceId string) *ActualLRPInstanceRemovedEvent { + return &ActualLRPInstanceRemovedEvent{ + ActualLrp: actualLrp, + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceRemovedEvent) EventType() string { + return EventTypeActualLRPInstanceRemoved +} + +func (event *ActualLRPInstanceRemovedEvent) Key() string { + if event.ActualLrp == nil { + return "" + } + return event.ActualLrp.GetInstanceGuid() +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPCreatedEvent(actualLRPGroup *ActualLRPGroup) *ActualLRPCreatedEvent { + return &ActualLRPCreatedEvent{ + ActualLrpGroup: actualLRPGroup, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPCreatedEvent) EventType() string { + return EventTypeActualLRPCreated +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPCreatedEvent) Key() string { + actualLRP, _, resolveError := event.ActualLrpGroup.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPInstanceCreatedEvent(actualLrp *ActualLRP, traceId string) *ActualLRPInstanceCreatedEvent { + return &ActualLRPInstanceCreatedEvent{ + ActualLrp: actualLrp, + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceCreatedEvent) EventType() string { + return EventTypeActualLRPInstanceCreated +} + +func (event *ActualLRPInstanceCreatedEvent) Key() string { + if event.ActualLrp == nil { + return "" + } + return event.ActualLrp.GetInstanceGuid() +} + +func (request *EventsByCellId) Validate() error { + return nil +} + +func NewTaskCreatedEvent(task *Task) *TaskCreatedEvent { + return &TaskCreatedEvent{ + Task: task, + } +} + +func (event *TaskCreatedEvent) EventType() string { + return EventTypeTaskCreated +} + +func (event *TaskCreatedEvent) Key() string { + return event.Task.GetTaskGuid() +} + +func NewTaskChangedEvent(before, after *Task) *TaskChangedEvent { + return &TaskChangedEvent{ + Before: before, + After: after, + } +} + +func (event *TaskChangedEvent) EventType() string { + return EventTypeTaskChanged +} + +func (event *TaskChangedEvent) Key() string { + return event.Before.GetTaskGuid() +} + +func NewTaskRemovedEvent(task *Task) *TaskRemovedEvent { + return &TaskRemovedEvent{ + Task: task, + } +} + +func (event *TaskRemovedEvent) EventType() string { + return EventTypeTaskRemoved +} + +func (event TaskRemovedEvent) Key() string { + return event.Task.GetTaskGuid() +} + +func (info *ActualLRPInfo) SetRoutable(routable bool) { + info.OptionalRoutable = &ActualLRPInfo_Routable{ + Routable: routable, + } +} + +func (info *ActualLRPInfo) RoutableExists() bool { + _, ok := info.GetOptionalRoutable().(*ActualLRPInfo_Routable) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.pb.go b/vendor/code.cloudfoundry.org/bbs/models/events.pb.go new file mode 100644 index 00000000..1716470b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.pb.go @@ -0,0 +1,4977 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: events.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Deprecated: Do not use. +type ActualLRPCreatedEvent struct { + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,1,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPCreatedEvent) Reset() { *m = ActualLRPCreatedEvent{} } +func (*ActualLRPCreatedEvent) ProtoMessage() {} +func (*ActualLRPCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{0} +} +func (m *ActualLRPCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPCreatedEvent.Merge(m, src) +} +func (m *ActualLRPCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPCreatedEvent proto.InternalMessageInfo + +func (m *ActualLRPCreatedEvent) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPChangedEvent struct { + Before *ActualLRPGroup `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *ActualLRPGroup `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` +} + +func (m *ActualLRPChangedEvent) Reset() { *m = ActualLRPChangedEvent{} } +func (*ActualLRPChangedEvent) ProtoMessage() {} +func (*ActualLRPChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{1} +} +func (m *ActualLRPChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPChangedEvent.Merge(m, src) +} +func (m *ActualLRPChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPChangedEvent proto.InternalMessageInfo + +func (m *ActualLRPChangedEvent) GetBefore() *ActualLRPGroup { + if m != nil { + return m.Before + } + return nil +} + +func (m *ActualLRPChangedEvent) GetAfter() *ActualLRPGroup { + if m != nil { + return m.After + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPRemovedEvent struct { + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,1,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPRemovedEvent) Reset() { *m = ActualLRPRemovedEvent{} } +func (*ActualLRPRemovedEvent) ProtoMessage() {} +func (*ActualLRPRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{2} +} +func (m *ActualLRPRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPRemovedEvent.Merge(m, src) +} +func (m *ActualLRPRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPRemovedEvent proto.InternalMessageInfo + +func (m *ActualLRPRemovedEvent) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +type ActualLRPInstanceCreatedEvent struct { + ActualLrp *ActualLRP `protobuf:"bytes,1,opt,name=actual_lrp,json=actualLrp,proto3" json:"actual_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceCreatedEvent) Reset() { *m = ActualLRPInstanceCreatedEvent{} } +func (*ActualLRPInstanceCreatedEvent) ProtoMessage() {} +func (*ActualLRPInstanceCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{3} +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceCreatedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceCreatedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceCreatedEvent) GetActualLrp() *ActualLRP { + if m != nil { + return m.ActualLrp + } + return nil +} + +func (m *ActualLRPInstanceCreatedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPInfo struct { + ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3,embedded=actual_lrp_net_info" json:""` + CrashCount int32 `protobuf:"varint,4,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,5,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state"` + PlacementError string `protobuf:"bytes,7,opt,name=placement_error,json=placementError,proto3" json:"placement_error,omitempty"` + Since int64 `protobuf:"varint,8,opt,name=since,proto3" json:"since"` + ModificationTag ModificationTag `protobuf:"bytes,9,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag"` + Presence ActualLRP_Presence `protobuf:"varint,10,opt,name=presence,proto3,enum=models.ActualLRP_Presence" json:"presence"` + // Types that are valid to be assigned to OptionalRoutable: + // *ActualLRPInfo_Routable + OptionalRoutable isActualLRPInfo_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,12,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *ActualLRPInfo) Reset() { *m = ActualLRPInfo{} } +func (*ActualLRPInfo) ProtoMessage() {} +func (*ActualLRPInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{4} +} +func (m *ActualLRPInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInfo.Merge(m, src) +} +func (m *ActualLRPInfo) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInfo proto.InternalMessageInfo + +type isActualLRPInfo_OptionalRoutable interface { + isActualLRPInfo_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRPInfo_Routable struct { + Routable bool `protobuf:"varint,11,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*ActualLRPInfo_Routable) isActualLRPInfo_OptionalRoutable() {} + +func (m *ActualLRPInfo) GetOptionalRoutable() isActualLRPInfo_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *ActualLRPInfo) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRPInfo) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRPInfo) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *ActualLRPInfo) GetPlacementError() string { + if m != nil { + return m.PlacementError + } + return "" +} + +func (m *ActualLRPInfo) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +func (m *ActualLRPInfo) GetModificationTag() ModificationTag { + if m != nil { + return m.ModificationTag + } + return ModificationTag{} +} + +func (m *ActualLRPInfo) GetPresence() ActualLRP_Presence { + if m != nil { + return m.Presence + } + return ActualLRP_Ordinary +} + +func (m *ActualLRPInfo) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*ActualLRPInfo_Routable); ok { + return x.Routable + } + return false +} + +func (m *ActualLRPInfo) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRPInfo) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRPInfo_Routable)(nil), + } +} + +type ActualLRPInstanceChangedEvent struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + Before *ActualLRPInfo `protobuf:"bytes,3,opt,name=before,proto3" json:"before,omitempty"` + After *ActualLRPInfo `protobuf:"bytes,4,opt,name=after,proto3" json:"after,omitempty"` + TraceId string `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceChangedEvent) Reset() { *m = ActualLRPInstanceChangedEvent{} } +func (*ActualLRPInstanceChangedEvent) ProtoMessage() {} +func (*ActualLRPInstanceChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{5} +} +func (m *ActualLRPInstanceChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceChangedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceChangedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceChangedEvent) GetBefore() *ActualLRPInfo { + if m != nil { + return m.Before + } + return nil +} + +func (m *ActualLRPInstanceChangedEvent) GetAfter() *ActualLRPInfo { + if m != nil { + return m.After + } + return nil +} + +func (m *ActualLRPInstanceChangedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPInstanceRemovedEvent struct { + ActualLrp *ActualLRP `protobuf:"bytes,1,opt,name=actual_lrp,json=actualLrp,proto3" json:"actual_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceRemovedEvent) Reset() { *m = ActualLRPInstanceRemovedEvent{} } +func (*ActualLRPInstanceRemovedEvent) ProtoMessage() {} +func (*ActualLRPInstanceRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{6} +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceRemovedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceRemovedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceRemovedEvent) GetActualLrp() *ActualLRP { + if m != nil { + return m.ActualLrp + } + return nil +} + +func (m *ActualLRPInstanceRemovedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPCreatedEvent struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPCreatedEvent) Reset() { *m = DesiredLRPCreatedEvent{} } +func (*DesiredLRPCreatedEvent) ProtoMessage() {} +func (*DesiredLRPCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{7} +} +func (m *DesiredLRPCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPCreatedEvent.Merge(m, src) +} +func (m *DesiredLRPCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPCreatedEvent proto.InternalMessageInfo + +func (m *DesiredLRPCreatedEvent) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +func (m *DesiredLRPCreatedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPChangedEvent struct { + Before *DesiredLRP `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *DesiredLRP `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` + TraceId string `protobuf:"bytes,3,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPChangedEvent) Reset() { *m = DesiredLRPChangedEvent{} } +func (*DesiredLRPChangedEvent) ProtoMessage() {} +func (*DesiredLRPChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{8} +} +func (m *DesiredLRPChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPChangedEvent.Merge(m, src) +} +func (m *DesiredLRPChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPChangedEvent proto.InternalMessageInfo + +func (m *DesiredLRPChangedEvent) GetBefore() *DesiredLRP { + if m != nil { + return m.Before + } + return nil +} + +func (m *DesiredLRPChangedEvent) GetAfter() *DesiredLRP { + if m != nil { + return m.After + } + return nil +} + +func (m *DesiredLRPChangedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPRemovedEvent struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPRemovedEvent) Reset() { *m = DesiredLRPRemovedEvent{} } +func (*DesiredLRPRemovedEvent) ProtoMessage() {} +func (*DesiredLRPRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{9} +} +func (m *DesiredLRPRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPRemovedEvent.Merge(m, src) +} +func (m *DesiredLRPRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPRemovedEvent proto.InternalMessageInfo + +func (m *DesiredLRPRemovedEvent) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +func (m *DesiredLRPRemovedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPCrashedEvent struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + CrashCount int32 `protobuf:"varint,3,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,4,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + Since int64 `protobuf:"varint,5,opt,name=since,proto3" json:"since"` +} + +func (m *ActualLRPCrashedEvent) Reset() { *m = ActualLRPCrashedEvent{} } +func (*ActualLRPCrashedEvent) ProtoMessage() {} +func (*ActualLRPCrashedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{10} +} +func (m *ActualLRPCrashedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPCrashedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPCrashedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPCrashedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPCrashedEvent.Merge(m, src) +} +func (m *ActualLRPCrashedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPCrashedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPCrashedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPCrashedEvent proto.InternalMessageInfo + +func (m *ActualLRPCrashedEvent) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRPCrashedEvent) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRPCrashedEvent) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +type EventsByCellId struct { + CellId string `protobuf:"bytes,1,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *EventsByCellId) Reset() { *m = EventsByCellId{} } +func (*EventsByCellId) ProtoMessage() {} +func (*EventsByCellId) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{11} +} +func (m *EventsByCellId) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventsByCellId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventsByCellId.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventsByCellId) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventsByCellId.Merge(m, src) +} +func (m *EventsByCellId) XXX_Size() int { + return m.Size() +} +func (m *EventsByCellId) XXX_DiscardUnknown() { + xxx_messageInfo_EventsByCellId.DiscardUnknown(m) +} + +var xxx_messageInfo_EventsByCellId proto.InternalMessageInfo + +func (m *EventsByCellId) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type TaskCreatedEvent struct { + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskCreatedEvent) Reset() { *m = TaskCreatedEvent{} } +func (*TaskCreatedEvent) ProtoMessage() {} +func (*TaskCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{12} +} +func (m *TaskCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskCreatedEvent.Merge(m, src) +} +func (m *TaskCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskCreatedEvent proto.InternalMessageInfo + +func (m *TaskCreatedEvent) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskChangedEvent struct { + Before *Task `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *Task `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` +} + +func (m *TaskChangedEvent) Reset() { *m = TaskChangedEvent{} } +func (*TaskChangedEvent) ProtoMessage() {} +func (*TaskChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{13} +} +func (m *TaskChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskChangedEvent.Merge(m, src) +} +func (m *TaskChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskChangedEvent proto.InternalMessageInfo + +func (m *TaskChangedEvent) GetBefore() *Task { + if m != nil { + return m.Before + } + return nil +} + +func (m *TaskChangedEvent) GetAfter() *Task { + if m != nil { + return m.After + } + return nil +} + +type TaskRemovedEvent struct { + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskRemovedEvent) Reset() { *m = TaskRemovedEvent{} } +func (*TaskRemovedEvent) ProtoMessage() {} +func (*TaskRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{14} +} +func (m *TaskRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskRemovedEvent.Merge(m, src) +} +func (m *TaskRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskRemovedEvent proto.InternalMessageInfo + +func (m *TaskRemovedEvent) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +func init() { + proto.RegisterType((*ActualLRPCreatedEvent)(nil), "models.ActualLRPCreatedEvent") + proto.RegisterType((*ActualLRPChangedEvent)(nil), "models.ActualLRPChangedEvent") + proto.RegisterType((*ActualLRPRemovedEvent)(nil), "models.ActualLRPRemovedEvent") + proto.RegisterType((*ActualLRPInstanceCreatedEvent)(nil), "models.ActualLRPInstanceCreatedEvent") + proto.RegisterType((*ActualLRPInfo)(nil), "models.ActualLRPInfo") + proto.RegisterType((*ActualLRPInstanceChangedEvent)(nil), "models.ActualLRPInstanceChangedEvent") + proto.RegisterType((*ActualLRPInstanceRemovedEvent)(nil), "models.ActualLRPInstanceRemovedEvent") + proto.RegisterType((*DesiredLRPCreatedEvent)(nil), "models.DesiredLRPCreatedEvent") + proto.RegisterType((*DesiredLRPChangedEvent)(nil), "models.DesiredLRPChangedEvent") + proto.RegisterType((*DesiredLRPRemovedEvent)(nil), "models.DesiredLRPRemovedEvent") + proto.RegisterType((*ActualLRPCrashedEvent)(nil), "models.ActualLRPCrashedEvent") + proto.RegisterType((*EventsByCellId)(nil), "models.EventsByCellId") + proto.RegisterType((*TaskCreatedEvent)(nil), "models.TaskCreatedEvent") + proto.RegisterType((*TaskChangedEvent)(nil), "models.TaskChangedEvent") + proto.RegisterType((*TaskRemovedEvent)(nil), "models.TaskRemovedEvent") +} + +func init() { proto.RegisterFile("events.proto", fileDescriptor_8f22242cb04491f9) } + +var fileDescriptor_8f22242cb04491f9 = []byte{ + // 913 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4f, 0x6f, 0xdb, 0x36, + 0x14, 0x17, 0x13, 0xdb, 0x71, 0x9e, 0x3d, 0xc7, 0x61, 0x9b, 0x54, 0x08, 0x3a, 0xc9, 0x33, 0x0a, + 0xd4, 0xd8, 0x56, 0xb7, 0x68, 0x8b, 0x1d, 0x76, 0xda, 0x9c, 0x16, 0x6b, 0xd0, 0x6e, 0x28, 0x88, + 0xee, 0x32, 0x74, 0x10, 0x68, 0x99, 0x76, 0x84, 0xc8, 0xa2, 0x21, 0xd1, 0x01, 0xdc, 0xd3, 0x3e, + 0xc2, 0x6e, 0xfb, 0x0a, 0xfb, 0x0c, 0xbb, 0xed, 0xd6, 0x63, 0x76, 0xeb, 0x49, 0x58, 0x9c, 0xcb, + 0xe0, 0x53, 0x3f, 0xc2, 0x20, 0x52, 0x52, 0x29, 0xdb, 0x48, 0x57, 0x60, 0x39, 0xf4, 0x64, 0xf2, + 0xbd, 0x1f, 0x7f, 0xef, 0x0f, 0x1f, 0x7f, 0x32, 0xd4, 0xd9, 0x29, 0x0b, 0x44, 0xd4, 0x9d, 0x84, + 0x5c, 0x70, 0x5c, 0x19, 0xf3, 0x01, 0xf3, 0xa3, 0x83, 0x3b, 0x23, 0x4f, 0x1c, 0x4f, 0xfb, 0x5d, + 0x97, 0x8f, 0xef, 0x8e, 0xf8, 0x88, 0xdf, 0x95, 0xee, 0xfe, 0x74, 0x28, 0x77, 0x72, 0x23, 0x57, + 0xea, 0xd8, 0x41, 0x93, 0xba, 0x62, 0x4a, 0x7d, 0xc7, 0x0f, 0x27, 0xa9, 0x65, 0x77, 0xc0, 0x22, + 0x2f, 0x64, 0x03, 0xcd, 0x04, 0x82, 0x46, 0x27, 0xe9, 0x7a, 0x7f, 0xcc, 0x07, 0xde, 0xd0, 0x73, + 0xa9, 0xf0, 0x78, 0xe0, 0x08, 0x3a, 0x52, 0xf6, 0xf6, 0xcf, 0xb0, 0xf7, 0xad, 0xa4, 0x7a, 0x46, + 0x9e, 0x1f, 0x86, 0x8c, 0x0a, 0x36, 0x78, 0x9c, 0xe4, 0x87, 0xbf, 0x01, 0x2d, 0x86, 0x33, 0x0a, + 0xf9, 0x74, 0x62, 0xa2, 0x16, 0xea, 0xd4, 0xee, 0xef, 0x77, 0x55, 0xce, 0xdd, 0xfc, 0xe0, 0x77, + 0x89, 0x97, 0x34, 0x14, 0xfe, 0x59, 0x38, 0x91, 0xfb, 0xaf, 0x37, 0x4c, 0xd4, 0x9e, 0xe9, 0xf4, + 0xc7, 0x34, 0x18, 0x65, 0xf4, 0x5d, 0xa8, 0xf4, 0xd9, 0x90, 0x87, 0xec, 0x3d, 0xa4, 0x29, 0x0a, + 0x7f, 0x09, 0x65, 0x3a, 0x14, 0x2c, 0x34, 0x37, 0x2e, 0x85, 0x2b, 0x90, 0x0c, 0xad, 0x57, 0x46, + 0xd8, 0x98, 0x9f, 0xfe, 0xbf, 0x95, 0xbd, 0x82, 0x4f, 0x73, 0xd4, 0x51, 0x10, 0x09, 0x1a, 0xb8, + 0xac, 0xd0, 0xc0, 0x7b, 0x00, 0xef, 0xc2, 0xa4, 0x01, 0x76, 0x57, 0x02, 0x90, 0xed, 0x9c, 0x1b, + 0xdf, 0x86, 0xaa, 0x08, 0xa9, 0xcb, 0x1c, 0x6f, 0x20, 0xcb, 0xdc, 0xee, 0xd5, 0x17, 0xb1, 0x9d, + 0xdb, 0xc8, 0x96, 0x5c, 0x1d, 0x0d, 0xda, 0x7f, 0x96, 0xe0, 0x13, 0x2d, 0xf8, 0x90, 0xe3, 0x1f, + 0xe1, 0x9a, 0x56, 0x53, 0xc0, 0x84, 0xe3, 0x05, 0x43, 0x6e, 0x6e, 0xca, 0xa8, 0xe6, 0x4a, 0xd4, + 0x1f, 0x98, 0x48, 0x8e, 0xf5, 0xea, 0xaf, 0x63, 0xdb, 0x38, 0x8b, 0x6d, 0xb4, 0x88, 0x6d, 0x83, + 0x34, 0xf3, 0x54, 0x52, 0x3f, 0xbe, 0x07, 0x35, 0x37, 0xa4, 0xd1, 0xb1, 0xe3, 0xf2, 0x69, 0x20, + 0xcc, 0x52, 0x0b, 0x75, 0xca, 0xbd, 0x9d, 0x45, 0x6c, 0xeb, 0x66, 0x02, 0x72, 0x73, 0x98, 0xac, + 0xf1, 0x67, 0x50, 0x57, 0xae, 0x90, 0xd1, 0x88, 0x07, 0x66, 0x39, 0xa9, 0x83, 0x28, 0x38, 0x91, + 0x26, 0x6c, 0x43, 0x39, 0x12, 0x54, 0x30, 0xb3, 0x22, 0x6b, 0xdc, 0x5e, 0xc4, 0xb6, 0x32, 0x10, + 0xf5, 0x83, 0x6f, 0xc3, 0xce, 0xc4, 0xa7, 0x2e, 0x1b, 0xb3, 0x40, 0x38, 0x2c, 0x0c, 0x79, 0x68, + 0x6e, 0x49, 0x9a, 0x46, 0x6e, 0x7e, 0x9c, 0x58, 0x25, 0x93, 0x17, 0xb8, 0xcc, 0xac, 0xb6, 0x50, + 0x67, 0x33, 0x65, 0x4a, 0x0c, 0x44, 0xfd, 0xe0, 0x97, 0xd0, 0x5c, 0x9e, 0x7b, 0x73, 0x5b, 0xf6, + 0xe4, 0x46, 0xd6, 0x93, 0xef, 0x35, 0xff, 0x0b, 0x3a, 0xea, 0x99, 0x49, 0x4b, 0x16, 0xb1, 0xbd, + 0x72, 0x90, 0xec, 0x8c, 0x8b, 0x50, 0xfc, 0x08, 0xaa, 0x93, 0x90, 0x45, 0x2c, 0xc9, 0x00, 0x5a, + 0xa8, 0xd3, 0xb8, 0x7f, 0xb0, 0xd2, 0xe9, 0xee, 0xf3, 0x14, 0xa1, 0xee, 0x32, 0xc3, 0x93, 0x7c, + 0x85, 0x6f, 0x42, 0x95, 0xf0, 0xa9, 0xa0, 0x7d, 0x9f, 0x99, 0xb5, 0x16, 0xea, 0x54, 0x9f, 0x18, + 0x24, 0xb7, 0xe0, 0x1e, 0xec, 0xd2, 0x53, 0xea, 0xf9, 0xb4, 0xef, 0xf9, 0x9e, 0x98, 0x39, 0xaf, + 0x78, 0xc0, 0xcc, 0xba, 0x6c, 0xdc, 0xde, 0x22, 0xb6, 0x57, 0x9d, 0xa4, 0xa9, 0x9b, 0x7e, 0xe2, + 0x01, 0xeb, 0x5d, 0x83, 0x5d, 0x3e, 0x49, 0x92, 0xa6, 0xbe, 0x13, 0xa6, 0xc4, 0xed, 0xbf, 0x36, + 0xd6, 0x0d, 0xb0, 0xfe, 0x44, 0x9f, 0x40, 0x43, 0x9b, 0xa9, 0x13, 0x36, 0x4b, 0x87, 0xf8, 0xfa, + 0x4a, 0x91, 0x4f, 0xd9, 0x6c, 0x69, 0x94, 0xea, 0xf9, 0x28, 0x3d, 0x65, 0x33, 0x4c, 0xe1, 0x86, + 0xc6, 0xe4, 0xa5, 0xc1, 0x24, 0xa5, 0x7a, 0xce, 0x37, 0x57, 0x28, 0xb3, 0x8c, 0x56, 0xa9, 0xaf, + 0xe7, 0xd4, 0x1a, 0x06, 0xdf, 0xc9, 0xf5, 0x44, 0xcd, 0xfc, 0xde, 0x1a, 0xc6, 0x21, 0xcf, 0xe5, + 0xe4, 0x8b, 0x4c, 0x4e, 0x4a, 0x97, 0xa1, 0x15, 0xa6, 0xf0, 0x2e, 0xcb, 0x97, 0xbd, 0xcb, 0x75, + 0x9a, 0x50, 0x90, 0x9e, 0x2b, 0xd4, 0x84, 0x53, 0xd8, 0x7f, 0xa4, 0xbe, 0x00, 0xcb, 0x4a, 0xfe, + 0x00, 0x6a, 0xda, 0xb7, 0x21, 0x8d, 0x8a, 0xb3, 0xa8, 0xef, 0x0e, 0x11, 0x48, 0x61, 0x1f, 0x14, + 0xf7, 0x37, 0x54, 0x08, 0xac, 0x0f, 0xd0, 0xe7, 0x4b, 0x1a, 0xbf, 0x2e, 0x66, 0x76, 0x21, 0x9d, + 0xa2, 0xbe, 0xaf, 0x83, 0xae, 0xb9, 0x8d, 0xcd, 0xff, 0xdc, 0x91, 0xc2, 0x35, 0x5c, 0x6d, 0x47, + 0xfe, 0xd8, 0x28, 0x7c, 0x53, 0x69, 0x74, 0xfc, 0x51, 0xbe, 0xa8, 0x25, 0xed, 0xdf, 0xfc, 0x70, + 0xed, 0x2f, 0xad, 0xd7, 0x7e, 0xa9, 0xd8, 0xe5, 0xf5, 0x8a, 0xdd, 0xfe, 0x0a, 0x1a, 0xb2, 0x57, + 0x51, 0x6f, 0x76, 0xc8, 0x7c, 0xff, 0x68, 0x80, 0x6f, 0xc1, 0x96, 0xcb, 0x7c, 0x3f, 0x69, 0x3b, + 0x92, 0x6d, 0xaf, 0x2d, 0x62, 0x3b, 0x33, 0x91, 0x8a, 0x2b, 0x51, 0xed, 0x87, 0xd0, 0x7c, 0x41, + 0xa3, 0x93, 0xc2, 0xe0, 0xb7, 0xa0, 0x94, 0xfc, 0x03, 0x4a, 0x9b, 0x5c, 0xcf, 0x3a, 0x92, 0xe0, + 0x88, 0xf4, 0xb4, 0x5f, 0xa6, 0xa7, 0xf4, 0xa9, 0xbd, 0xb5, 0x34, 0xb5, 0xc5, 0x73, 0xd9, 0xbc, + 0xb6, 0x8b, 0xf3, 0x5a, 0x04, 0x29, 0x57, 0x96, 0x53, 0x61, 0xf4, 0xde, 0x9b, 0x53, 0xef, 0xe1, + 0xd9, 0xb9, 0x65, 0xbc, 0x39, 0xb7, 0x8c, 0xb7, 0xe7, 0x16, 0xfa, 0x65, 0x6e, 0xa1, 0xdf, 0xe7, + 0x16, 0x7a, 0x3d, 0xb7, 0xd0, 0xd9, 0xdc, 0x42, 0x7f, 0xcf, 0x2d, 0xf4, 0xcf, 0xdc, 0x32, 0xde, + 0xce, 0x2d, 0xf4, 0xeb, 0x85, 0x65, 0x9c, 0x5d, 0x58, 0xc6, 0x9b, 0x0b, 0xcb, 0xe8, 0x57, 0xe4, + 0xdf, 0xb9, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x15, 0xfc, 0xcc, 0x44, 0x5e, 0x0a, 0x00, + 0x00, +} + +func (this *ActualLRPCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPCreatedEvent) + if !ok { + that2, ok := that.(ActualLRPCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPChangedEvent) + if !ok { + that2, ok := that.(ActualLRPChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + return true +} +func (this *ActualLRPRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPRemovedEvent) + if !ok { + that2, ok := that.(ActualLRPRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPInstanceCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceCreatedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrp.Equal(that1.ActualLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInfo) + if !ok { + that2, ok := that.(ActualLRPInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPNetInfo.Equal(&that1.ActualLRPNetInfo) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.State != that1.State { + return false + } + if this.PlacementError != that1.PlacementError { + return false + } + if this.Since != that1.Since { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if this.Presence != that1.Presence { + return false + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *ActualLRPInfo_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInfo_Routable) + if !ok { + that2, ok := that.(ActualLRPInfo_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *ActualLRPInstanceChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceChangedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPInstanceRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceRemovedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrp.Equal(that1.ActualLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPCreatedEvent) + if !ok { + that2, ok := that.(DesiredLRPCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPChangedEvent) + if !ok { + that2, ok := that.(DesiredLRPChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPRemovedEvent) + if !ok { + that2, ok := that.(DesiredLRPRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPCrashedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPCrashedEvent) + if !ok { + that2, ok := that.(ActualLRPCrashedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.Since != that1.Since { + return false + } + return true +} +func (this *EventsByCellId) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EventsByCellId) + if !ok { + that2, ok := that.(EventsByCellId) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *TaskCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskCreatedEvent) + if !ok { + that2, ok := that.(TaskCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *TaskChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskChangedEvent) + if !ok { + that2, ok := that.(TaskChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + return true +} +func (this *TaskRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskRemovedEvent) + if !ok { + that2, ok := that.(TaskRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *ActualLRPCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPCreatedEvent{") + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPRemovedEvent{") + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceCreatedEvent{") + if this.ActualLrp != nil { + s = append(s, "ActualLrp: "+fmt.Sprintf("%#v", this.ActualLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&models.ActualLRPInfo{") + s = append(s, "ActualLRPNetInfo: "+strings.Replace(this.ActualLRPNetInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "PlacementError: "+fmt.Sprintf("%#v", this.PlacementError)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Presence: "+fmt.Sprintf("%#v", this.Presence)+",\n") + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInfo_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRPInfo_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *ActualLRPInstanceChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.ActualLRPInstanceChangedEvent{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceRemovedEvent{") + if this.ActualLrp != nil { + s = append(s, "ActualLrp: "+fmt.Sprintf("%#v", this.ActualLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPCreatedEvent{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesiredLRPChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPRemovedEvent{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPCrashedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.ActualLRPCrashedEvent{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EventsByCellId) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.EventsByCellId{") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskCreatedEvent{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TaskChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskRemovedEvent{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEvents(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrp != nil { + { + size, err := m.ActualLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintEvents(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x62 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Presence != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Presence)) + i-- + dAtA[i] = 0x50 + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Since != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x40 + } + if len(m.PlacementError) > 0 { + i -= len(m.PlacementError) + copy(dAtA[i:], m.PlacementError) + i = encodeVarintEvents(dAtA, i, uint64(len(m.PlacementError))) + i-- + dAtA[i] = 0x3a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintEvents(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x2a + } + if m.CrashCount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.ActualLRPNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} + +func (m *ActualLRPInfo_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInfo_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + return len(dAtA) - i, nil +} +func (m *ActualLRPInstanceChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x2a + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrp != nil { + { + size, err := m.ActualLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x1a + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPCrashedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPCrashedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPCrashedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Since != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x28 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x22 + } + if m.CrashCount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventsByCellId) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventsByCellId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventsByCellId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrp != nil { + l = m.ActualLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPNetInfo.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovEvents(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.PlacementError) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovEvents(uint64(m.Since)) + } + l = m.ModificationTag.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.Presence != 0 { + n += 1 + sovEvents(uint64(m.Presence)) + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInfo_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ActualLRPInstanceChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrp != nil { + l = m.ActualLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPCrashedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovEvents(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovEvents(uint64(m.Since)) + } + return n +} + +func (m *EventsByCellId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPCreatedEvent{`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPRemovedEvent{`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceCreatedEvent{`, + `ActualLrp:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrp), "ActualLRP", "ActualLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInfo{`, + `ActualLRPNetInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `PlacementError:` + fmt.Sprintf("%v", this.PlacementError) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `Presence:` + fmt.Sprintf("%v", this.Presence) + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInfo_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInfo_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceChangedEvent{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPKey), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `Before:` + strings.Replace(this.Before.String(), "ActualLRPInfo", "ActualLRPInfo", 1) + `,`, + `After:` + strings.Replace(this.After.String(), "ActualLRPInfo", "ActualLRPInfo", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceRemovedEvent{`, + `ActualLrp:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrp), "ActualLRP", "ActualLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPCreatedEvent{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "DesiredLRP", "DesiredLRP", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPRemovedEvent{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPCrashedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPCrashedEvent{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPKey), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `}`, + }, "") + return s +} +func (this *EventsByCellId) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventsByCellId{`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *TaskCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCreatedEvent{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "Task", "Task", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskRemovedEvent{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringEvents(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &ActualLRPGroup{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &ActualLRPGroup{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrp == nil { + m.ActualLrp = &ActualLRP{} + } + if err := m.ActualLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Presence", wireType) + } + m.Presence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Presence |= ActualLRP_Presence(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &ActualLRPInfo_Routable{b} + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &ActualLRPInfo{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &ActualLRPInfo{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrp == nil { + m.ActualLrp = &ActualLRP{} + } + if err := m.ActualLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &DesiredLRP{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &DesiredLRP{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPCrashedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPCrashedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPCrashedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventsByCellId) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventsByCellId: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventsByCellId: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &Task{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &Task{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.proto b/vendor/code.cloudfoundry.org/bbs/models/events.proto new file mode 100644 index 00000000..c6bf8ecc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.proto @@ -0,0 +1,99 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "desired_lrp.proto"; +import "task.proto"; +import "modification_tag.proto"; + +message ActualLRPCreatedEvent { + option deprecated = true; + ActualLRPGroup actual_lrp_group = 1; +} + +message ActualLRPChangedEvent { + option deprecated = true; + ActualLRPGroup before = 1; + ActualLRPGroup after = 2; +} + +message ActualLRPRemovedEvent { + option deprecated = true; + ActualLRPGroup actual_lrp_group = 1; +} + +message ActualLRPInstanceCreatedEvent { + ActualLRP actual_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPInfo { + ActualLRPNetInfo actual_lrp_net_info = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 4 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 5; + string state = 6 [(gogoproto.jsontag) = "state"]; + string placement_error = 7; + int64 since = 8 [(gogoproto.jsontag) = "since"]; + ModificationTag modification_tag = 9 [(gogoproto.nullable) = false,(gogoproto.jsontag) = "modification_tag"]; + ActualLRP.Presence presence = 10 [(gogoproto.jsontag) = "presence"]; + oneof optional_routable { + bool Routable = 11; + } + string availability_zone = 12 [(gogoproto.jsontag) = "availability_zone"]; +} + +message ActualLRPInstanceChangedEvent { + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInfo before = 3; + ActualLRPInfo after = 4; + string trace_id = 5 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPInstanceRemovedEvent { + ActualLRP actual_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPCreatedEvent { + DesiredLRP desired_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPChangedEvent { + DesiredLRP before = 1; + DesiredLRP after = 2; + string trace_id = 3 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPRemovedEvent { + DesiredLRP desired_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPCrashedEvent { + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 3 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 4; + int64 since = 5 [(gogoproto.jsontag) = "since"]; +} + +message EventsByCellId { + string cell_id = 1 [(gogoproto.jsontag) = "cell_id"]; +} + +message TaskCreatedEvent { + Task task = 1; +} + +message TaskChangedEvent { + Task before = 1; + Task after = 2; +} + +message TaskRemovedEvent { + Task task = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/file.pb.go b/vendor/code.cloudfoundry.org/bbs/models/file.pb.go new file mode 100644 index 00000000..e93426fc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/file.pb.go @@ -0,0 +1,434 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: file.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type File struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path"` + Content string `protobuf:"bytes,2,opt,name=content,proto3" json:"content"` +} + +func (m *File) Reset() { *m = File{} } +func (*File) ProtoMessage() {} +func (*File) Descriptor() ([]byte, []int) { + return fileDescriptor_9188e3b7e55e1162, []int{0} +} +func (m *File) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_File.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *File) XXX_Merge(src proto.Message) { + xxx_messageInfo_File.Merge(m, src) +} +func (m *File) XXX_Size() int { + return m.Size() +} +func (m *File) XXX_DiscardUnknown() { + xxx_messageInfo_File.DiscardUnknown(m) +} + +var xxx_messageInfo_File proto.InternalMessageInfo + +func (m *File) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *File) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func init() { + proto.RegisterType((*File)(nil), "models.File") +} + +func init() { proto.RegisterFile("file.proto", fileDescriptor_9188e3b7e55e1162) } + +var fileDescriptor_9188e3b7e55e1162 = []byte{ + // 192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0xcb, 0xcc, 0x49, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xe4, 0xcd, 0xc5, + 0xe2, 0x96, 0x99, 0x93, 0x2a, 0x24, 0xc3, 0xc5, 0x52, 0x90, 0x58, 0x92, 0x21, 0xc1, 0xa8, 0xc0, + 0xa8, 0xc1, 0xe9, 0xc4, 0xf1, 0xea, 0x9e, 0x3c, 0x98, 0x1f, 0x04, 0x26, 0x85, 0x54, 0xb9, 0xd8, + 0x93, 0xf3, 0xf3, 0x4a, 0x52, 0xf3, 0x4a, 0x24, 0x98, 0xc0, 0x0a, 0xb8, 0x5f, 0xdd, 0x93, 0x87, + 0x09, 0x05, 0xc1, 0x18, 0x4e, 0x26, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, + 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, + 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, + 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, + 0x2e, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcd, 0x8f, 0x30, 0x8c, 0xce, 0x00, 0x00, 0x00, +} + +func (this *File) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*File) + if !ok { + that2, ok := that.(File) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if this.Content != that1.Content { + return false + } + return true +} +func (this *File) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.File{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Content: "+fmt.Sprintf("%#v", this.Content)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFile(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *File) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *File) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *File) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Content) > 0 { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintFile(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintFile(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFile(dAtA []byte, offset int, v uint64) int { + offset -= sovFile(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *File) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovFile(uint64(l)) + } + l = len(m.Content) + if l > 0 { + n += 1 + l + sovFile(uint64(l)) + } + return n +} + +func sovFile(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFile(x uint64) (n int) { + return sovFile(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&File{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Content:` + fmt.Sprintf("%v", this.Content) + `,`, + `}`, + }, "") + return s +} +func valueToStringFile(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *File) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFile + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: File: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: File: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFile + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFile + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFile + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFile + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFile + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFile + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFile(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFile + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFile(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFile + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFile + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFile + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFile + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFile + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFile + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFile = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFile = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFile = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/file.proto b/vendor/code.cloudfoundry.org/bbs/models/file.proto new file mode 100644 index 00000000..9f314e63 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/file.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message File { + string path = 1 [(gogoproto.jsontag) = "path"]; + string content = 2 [(gogoproto.jsontag) = "content"]; +} \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.go b/vendor/code.cloudfoundry.org/bbs/models/image_layer.go new file mode 100644 index 00000000..6a62e561 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.go @@ -0,0 +1,228 @@ +package models + +import ( + "encoding/json" + "fmt" + strings "strings" +) + +func (l *ImageLayer) Validate() error { + var validationError ValidationError + + if l.GetUrl() == "" { + validationError = validationError.Append(ErrInvalidField{"url"}) + } + + if l.GetDestinationPath() == "" { + validationError = validationError.Append(ErrInvalidField{"destination_path"}) + } + + if !l.LayerType.Valid() { + validationError = validationError.Append(ErrInvalidField{"layer_type"}) + } + + if !l.MediaType.Valid() { + validationError = validationError.Append(ErrInvalidField{"media_type"}) + } + + if (l.DigestValue != "" || l.LayerType == LayerTypeExclusive) && l.DigestAlgorithm == DigestAlgorithmInvalid { + validationError = validationError.Append(ErrInvalidField{"digest_algorithm"}) + } + + if (l.DigestAlgorithm != DigestAlgorithmInvalid || l.LayerType == LayerTypeExclusive) && l.DigestValue == "" { + validationError = validationError.Append(ErrInvalidField{"digest_value"}) + } + + if l.DigestValue != "" && !l.DigestAlgorithm.Valid() { + validationError = validationError.Append(ErrInvalidField{"digest_algorithm"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func validateImageLayers(layers []*ImageLayer, legacyDownloadUser string) ValidationError { + var validationError ValidationError + + requiresLegacyDownloadUser := false + if len(layers) > 0 { + for _, layer := range layers { + err := layer.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"image_layer"}) + validationError = validationError.Append(err) + } + + if layer.LayerType == LayerTypeExclusive { + requiresLegacyDownloadUser = true + } + } + } + + if requiresLegacyDownloadUser && legacyDownloadUser == "" { + validationError = validationError.Append(ErrInvalidField{"legacy_download_user"}) + } + + return validationError +} + +type ImageLayers []*ImageLayer + +func (layers ImageLayers) FilterByType(layerType ImageLayer_Type) ImageLayers { + var filtered ImageLayers + + for _, layer := range layers { + if layer.GetLayerType() == layerType { + filtered = append(filtered, layer) + } + } + return filtered +} + +func (layers ImageLayers) ToDownloadActions(legacyDownloadUser string, existingAction *Action) *Action { + downloadActions := []ActionInterface{} + + for _, layer := range layers.FilterByType(LayerTypeExclusive) { + digestAlgorithmName := strings.ToLower(layer.DigestAlgorithm.String()) + downloadActions = append(downloadActions, &DownloadAction{ + Artifact: layer.Name, + From: layer.Url, + To: layer.DestinationPath, + CacheKey: digestAlgorithmName + ":" + layer.DigestValue, // digest required for exclusive layers + User: legacyDownloadUser, + ChecksumAlgorithm: digestAlgorithmName, + ChecksumValue: layer.DigestValue, + }) + } + + var action *Action + if len(downloadActions) > 0 { + parallelDownloadActions := Parallel(downloadActions...) + if existingAction != nil { + action = WrapAction(Serial(parallelDownloadActions, UnwrapAction(existingAction))) + } else { + action = WrapAction(parallelDownloadActions) + } + } else { + action = existingAction + } + + return action +} + +func (layers ImageLayers) ToCachedDependencies() []*CachedDependency { + cachedDependencies := []*CachedDependency{} + for _, layer := range layers.FilterByType(LayerTypeShared) { + c := &CachedDependency{ + Name: layer.Name, + From: layer.Url, + To: layer.DestinationPath, + ChecksumValue: layer.DigestValue, + } + + if layer.DigestAlgorithm == DigestAlgorithmInvalid { + c.ChecksumAlgorithm = "" + } else { + c.ChecksumAlgorithm = strings.ToLower(layer.DigestAlgorithm.String()) + } + + if layer.DigestValue == "" { + c.CacheKey = layer.Url + } else { + c.CacheKey = c.ChecksumAlgorithm + ":" + layer.DigestValue + } + + cachedDependencies = append(cachedDependencies, c) + } + + return cachedDependencies +} + +func (d ImageLayer_DigestAlgorithm) Valid() bool { + switch d { + case DigestAlgorithmSha256: + return true + case DigestAlgorithmSha512: + return true + default: + return false + } +} + +func (m ImageLayer_MediaType) Valid() bool { + switch m { + case MediaTypeTar: + return true + case MediaTypeTgz: + return true + case MediaTypeZip: + return true + default: + return false + } +} + +func (t ImageLayer_Type) Valid() bool { + switch t { + case LayerTypeExclusive: + return true + case LayerTypeShared: + return true + default: + return false + } +} + +func (d *ImageLayer_DigestAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_DigestAlgorithm_value[name]; found { + *d = ImageLayer_DigestAlgorithm(v) + return nil + } + return fmt.Errorf("invalid digest_algorithm: %s", name) +} + +func (d ImageLayer_DigestAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +func (m *ImageLayer_MediaType) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_MediaType_value[name]; found { + *m = ImageLayer_MediaType(v) + return nil + } + return fmt.Errorf("invalid media_type: %s", name) +} + +func (m ImageLayer_MediaType) MarshalJSON() ([]byte, error) { + return json.Marshal(m.String()) +} + +func (t *ImageLayer_Type) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_Type_value[name]; found { + *t = ImageLayer_Type(v) + return nil + } + return fmt.Errorf("invalid type: %s", name) +} + +func (t ImageLayer_Type) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go b/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go new file mode 100644 index 00000000..8dda5974 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: image_layer.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ImageLayer_DigestAlgorithm int32 + +const ( + DigestAlgorithmInvalid ImageLayer_DigestAlgorithm = 0 + DigestAlgorithmSha256 ImageLayer_DigestAlgorithm = 1 + DigestAlgorithmSha512 ImageLayer_DigestAlgorithm = 2 // Deprecated: Do not use. +) + +var ImageLayer_DigestAlgorithm_name = map[int32]string{ + 0: "DigestAlgorithmInvalid", + 1: "SHA256", + 2: "SHA512", +} + +var ImageLayer_DigestAlgorithm_value = map[string]int32{ + "DigestAlgorithmInvalid": 0, + "SHA256": 1, + "SHA512": 2, +} + +func (ImageLayer_DigestAlgorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 0} +} + +type ImageLayer_MediaType int32 + +const ( + MediaTypeInvalid ImageLayer_MediaType = 0 + MediaTypeTgz ImageLayer_MediaType = 1 + MediaTypeTar ImageLayer_MediaType = 2 + MediaTypeZip ImageLayer_MediaType = 3 +) + +var ImageLayer_MediaType_name = map[int32]string{ + 0: "MediaTypeInvalid", + 1: "TGZ", + 2: "TAR", + 3: "ZIP", +} + +var ImageLayer_MediaType_value = map[string]int32{ + "MediaTypeInvalid": 0, + "TGZ": 1, + "TAR": 2, + "ZIP": 3, +} + +func (ImageLayer_MediaType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 1} +} + +type ImageLayer_Type int32 + +const ( + LayerTypeInvalid ImageLayer_Type = 0 + LayerTypeShared ImageLayer_Type = 1 + LayerTypeExclusive ImageLayer_Type = 2 +) + +var ImageLayer_Type_name = map[int32]string{ + 0: "LayerTypeInvalid", + 1: "SHARED", + 2: "EXCLUSIVE", +} + +var ImageLayer_Type_value = map[string]int32{ + "LayerTypeInvalid": 0, + "SHARED": 1, + "EXCLUSIVE": 2, +} + +func (ImageLayer_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 2} +} + +type ImageLayer struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url"` + DestinationPath string `protobuf:"bytes,3,opt,name=destination_path,json=destinationPath,proto3" json:"destination_path"` + LayerType ImageLayer_Type `protobuf:"varint,4,opt,name=layer_type,json=layerType,proto3,enum=models.ImageLayer_Type" json:"layer_type"` + MediaType ImageLayer_MediaType `protobuf:"varint,5,opt,name=media_type,json=mediaType,proto3,enum=models.ImageLayer_MediaType" json:"media_type"` + DigestAlgorithm ImageLayer_DigestAlgorithm `protobuf:"varint,6,opt,name=digest_algorithm,json=digestAlgorithm,proto3,enum=models.ImageLayer_DigestAlgorithm" json:"digest_algorithm,omitempty"` + DigestValue string `protobuf:"bytes,7,opt,name=digest_value,json=digestValue,proto3" json:"digest_value,omitempty"` +} + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageLayer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayer) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ImageLayer) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ImageLayer) GetDestinationPath() string { + if m != nil { + return m.DestinationPath + } + return "" +} + +func (m *ImageLayer) GetLayerType() ImageLayer_Type { + if m != nil { + return m.LayerType + } + return LayerTypeInvalid +} + +func (m *ImageLayer) GetMediaType() ImageLayer_MediaType { + if m != nil { + return m.MediaType + } + return MediaTypeInvalid +} + +func (m *ImageLayer) GetDigestAlgorithm() ImageLayer_DigestAlgorithm { + if m != nil { + return m.DigestAlgorithm + } + return DigestAlgorithmInvalid +} + +func (m *ImageLayer) GetDigestValue() string { + if m != nil { + return m.DigestValue + } + return "" +} + +func init() { + proto.RegisterEnum("models.ImageLayer_DigestAlgorithm", ImageLayer_DigestAlgorithm_name, ImageLayer_DigestAlgorithm_value) + proto.RegisterEnum("models.ImageLayer_MediaType", ImageLayer_MediaType_name, ImageLayer_MediaType_value) + proto.RegisterEnum("models.ImageLayer_Type", ImageLayer_Type_name, ImageLayer_Type_value) + proto.RegisterType((*ImageLayer)(nil), "models.ImageLayer") +} + +func init() { proto.RegisterFile("image_layer.proto", fileDescriptor_c089288d9f3c46a0) } + +var fileDescriptor_c089288d9f3c46a0 = []byte{ + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0x41, 0x6f, 0x12, 0x41, + 0x14, 0xc7, 0x77, 0xa0, 0xa5, 0xf2, 0x6c, 0xca, 0x38, 0xd6, 0xba, 0xac, 0x66, 0x58, 0x49, 0x1a, + 0x7b, 0x91, 0xa6, 0x28, 0xbd, 0x1a, 0xb0, 0xa8, 0x44, 0x9a, 0x34, 0x0b, 0x36, 0x86, 0x0b, 0x19, + 0xba, 0xe3, 0xee, 0x26, 0xbb, 0x2c, 0x59, 0x16, 0x22, 0x26, 0x26, 0x9e, 0x39, 0xf9, 0x05, 0xb8, + 0xfb, 0x51, 0x3c, 0x72, 0xec, 0xc1, 0x10, 0x59, 0x2e, 0x86, 0x53, 0x3f, 0x82, 0xd9, 0x41, 0xa0, + 0xae, 0x5c, 0x36, 0xef, 0xfd, 0xff, 0xbf, 0xf9, 0xbf, 0x99, 0xd9, 0x0c, 0xdc, 0xb3, 0x1c, 0x66, + 0xf0, 0xa6, 0xcd, 0x06, 0xdc, 0xcb, 0x75, 0x3c, 0xd7, 0x77, 0x49, 0xc2, 0x71, 0x75, 0x6e, 0x77, + 0x95, 0x67, 0x86, 0xe5, 0x9b, 0xbd, 0x56, 0xee, 0xca, 0x75, 0x8e, 0x0d, 0xd7, 0x70, 0x8f, 0x85, + 0xdd, 0xea, 0x7d, 0x14, 0x9d, 0x68, 0x44, 0xb5, 0x58, 0x96, 0xfd, 0xb9, 0x0d, 0x50, 0x09, 0xc3, + 0xaa, 0x61, 0x16, 0x21, 0xb0, 0xd5, 0x66, 0x0e, 0x97, 0x91, 0x8a, 0x8e, 0x92, 0x9a, 0xa8, 0x49, + 0x1a, 0xe2, 0x3d, 0xcf, 0x96, 0x63, 0xa1, 0x54, 0xda, 0x99, 0x4f, 0x32, 0x61, 0xab, 0x85, 0x1f, + 0xf2, 0x12, 0xb0, 0xce, 0xbb, 0xbe, 0xd5, 0x66, 0xbe, 0xe5, 0xb6, 0x9b, 0x1d, 0xe6, 0x9b, 0x72, + 0x5c, 0x70, 0xfb, 0xf3, 0x49, 0xe6, 0x3f, 0x4f, 0x4b, 0xdd, 0x52, 0x2e, 0x98, 0x6f, 0x92, 0xd7, + 0x00, 0xe2, 0x10, 0x4d, 0x7f, 0xd0, 0xe1, 0xf2, 0x96, 0x8a, 0x8e, 0xf6, 0xf2, 0x0f, 0x73, 0x8b, + 0xa3, 0xe4, 0xd6, 0xfb, 0xca, 0xd5, 0x07, 0x1d, 0x5e, 0xda, 0x9b, 0x4f, 0x32, 0xb7, 0x70, 0x2d, + 0x29, 0xea, 0xd0, 0x22, 0xef, 0x00, 0x1c, 0xae, 0x5b, 0x6c, 0x91, 0xb3, 0x2d, 0x72, 0x1e, 0x6f, + 0xc8, 0x39, 0x0f, 0xa1, 0x75, 0xd8, 0x7a, 0x8d, 0x96, 0x74, 0x96, 0x16, 0x39, 0x07, 0xac, 0x5b, + 0x06, 0xef, 0xfa, 0x4d, 0x66, 0x1b, 0xae, 0x67, 0xf9, 0xa6, 0x23, 0x27, 0x44, 0x64, 0x76, 0x43, + 0xe4, 0x99, 0x40, 0x8b, 0x4b, 0x52, 0x4b, 0xe9, 0xff, 0x0a, 0xe4, 0x09, 0xec, 0xfe, 0x8d, 0xeb, + 0x33, 0xbb, 0xc7, 0xe5, 0x1d, 0x71, 0xb7, 0x77, 0x17, 0xda, 0x65, 0x28, 0x65, 0xbf, 0x40, 0x2a, + 0x12, 0x43, 0x14, 0x38, 0x88, 0x48, 0x95, 0x76, 0x9f, 0xd9, 0x96, 0x8e, 0x25, 0x72, 0x08, 0x89, + 0xda, 0xdb, 0x62, 0xbe, 0x70, 0x8a, 0x91, 0x92, 0x1e, 0x8e, 0xd4, 0x07, 0x11, 0xb2, 0x66, 0xb2, + 0x7c, 0xe1, 0x94, 0x3c, 0x15, 0x58, 0xe1, 0x24, 0x8f, 0x63, 0xca, 0xa3, 0xcd, 0x58, 0xe1, 0x24, + 0x7f, 0x07, 0x65, 0x3d, 0x48, 0xae, 0x2e, 0x86, 0xec, 0x03, 0x5e, 0x35, 0xeb, 0x91, 0x69, 0x88, + 0xd7, 0xdf, 0x34, 0x30, 0x52, 0xf0, 0x70, 0xa4, 0xee, 0xae, 0x80, 0xba, 0xf1, 0x59, 0x58, 0x45, + 0x0d, 0xc7, 0xa2, 0x16, 0xf3, 0x42, 0xab, 0x51, 0xb9, 0xc0, 0xf1, 0x88, 0xd5, 0xb0, 0x3a, 0x59, + 0x1d, 0xb6, 0x96, 0xe3, 0xaa, 0xcb, 0xdf, 0xb8, 0x1e, 0x97, 0x11, 0x5b, 0xd7, 0xca, 0x67, 0x18, + 0x29, 0xf7, 0x87, 0x23, 0x35, 0xb5, 0x62, 0x6a, 0x26, 0xf3, 0xb8, 0x4e, 0x0e, 0x21, 0x59, 0xfe, + 0xf0, 0xaa, 0xfa, 0xbe, 0x56, 0xb9, 0x2c, 0xe3, 0x98, 0x72, 0x30, 0x1c, 0xa9, 0x64, 0xc5, 0x94, + 0x3f, 0x5d, 0xd9, 0xbd, 0xae, 0xd5, 0xe7, 0xa5, 0x17, 0xe3, 0x29, 0x95, 0xae, 0xa7, 0x54, 0xba, + 0x99, 0x52, 0xf4, 0x35, 0xa0, 0xe8, 0x7b, 0x40, 0xd1, 0x8f, 0x80, 0xa2, 0x71, 0x40, 0xd1, 0xaf, + 0x80, 0xa2, 0xdf, 0x01, 0x95, 0x6e, 0x02, 0x8a, 0xbe, 0xcd, 0xa8, 0x34, 0x9e, 0x51, 0xe9, 0x7a, + 0x46, 0xa5, 0x56, 0x42, 0xbc, 0x8d, 0xe7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x90, 0xc9, + 0x42, 0x67, 0x03, 0x00, 0x00, +} + +func (x ImageLayer_DigestAlgorithm) String() string { + s, ok := ImageLayer_DigestAlgorithm_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ImageLayer_MediaType) String() string { + s, ok := ImageLayer_MediaType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ImageLayer_Type) String() string { + s, ok := ImageLayer_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *ImageLayer) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ImageLayer) + if !ok { + that2, ok := that.(ImageLayer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Url != that1.Url { + return false + } + if this.DestinationPath != that1.DestinationPath { + return false + } + if this.LayerType != that1.LayerType { + return false + } + if this.MediaType != that1.MediaType { + return false + } + if this.DigestAlgorithm != that1.DigestAlgorithm { + return false + } + if this.DigestValue != that1.DigestValue { + return false + } + return true +} +func (this *ImageLayer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.ImageLayer{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Url: "+fmt.Sprintf("%#v", this.Url)+",\n") + s = append(s, "DestinationPath: "+fmt.Sprintf("%#v", this.DestinationPath)+",\n") + s = append(s, "LayerType: "+fmt.Sprintf("%#v", this.LayerType)+",\n") + s = append(s, "MediaType: "+fmt.Sprintf("%#v", this.MediaType)+",\n") + s = append(s, "DigestAlgorithm: "+fmt.Sprintf("%#v", this.DigestAlgorithm)+",\n") + s = append(s, "DigestValue: "+fmt.Sprintf("%#v", this.DigestValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringImageLayer(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DigestValue) > 0 { + i -= len(m.DigestValue) + copy(dAtA[i:], m.DigestValue) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.DigestValue))) + i-- + dAtA[i] = 0x3a + } + if m.DigestAlgorithm != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.DigestAlgorithm)) + i-- + dAtA[i] = 0x30 + } + if m.MediaType != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.MediaType)) + i-- + dAtA[i] = 0x28 + } + if m.LayerType != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.LayerType)) + i-- + dAtA[i] = 0x20 + } + if len(m.DestinationPath) > 0 { + i -= len(m.DestinationPath) + copy(dAtA[i:], m.DestinationPath) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.DestinationPath))) + i-- + dAtA[i] = 0x1a + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintImageLayer(dAtA []byte, offset int, v uint64) int { + offset -= sovImageLayer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + l = len(m.DestinationPath) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + if m.LayerType != 0 { + n += 1 + sovImageLayer(uint64(m.LayerType)) + } + if m.MediaType != 0 { + n += 1 + sovImageLayer(uint64(m.MediaType)) + } + if m.DigestAlgorithm != 0 { + n += 1 + sovImageLayer(uint64(m.DigestAlgorithm)) + } + l = len(m.DigestValue) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + return n +} + +func sovImageLayer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozImageLayer(x uint64) (n int) { + return sovImageLayer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`, + `LayerType:` + fmt.Sprintf("%v", this.LayerType) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `DigestAlgorithm:` + fmt.Sprintf("%v", this.DigestAlgorithm) + `,`, + `DigestValue:` + fmt.Sprintf("%v", this.DigestValue) + `,`, + `}`, + }, "") + return s +} +func valueToStringImageLayer(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerType", wireType) + } + m.LayerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerType |= ImageLayer_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + m.MediaType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MediaType |= ImageLayer_MediaType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DigestAlgorithm", wireType) + } + m.DigestAlgorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DigestAlgorithm |= ImageLayer_DigestAlgorithm(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DigestValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DigestValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipImageLayer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthImageLayer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipImageLayer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthImageLayer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupImageLayer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthImageLayer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthImageLayer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowImageLayer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupImageLayer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto b/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto new file mode 100644 index 00000000..07b64913 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message ImageLayer { + enum DigestAlgorithm { + DigestAlgorithmInvalid = 0; // not camel cased since it isn't supposed to be used by API users + SHA256 = 1 [(gogoproto.enumvalue_customname) = "DigestAlgorithmSha256"]; + SHA512 = 2 [(gogoproto.enumvalue_customname) = "DigestAlgorithmSha512", deprecated=true]; + } + + enum MediaType { + MediaTypeInvalid = 0; // not camel cased since it isn't supposed to be used by API users + TGZ = 1 [(gogoproto.enumvalue_customname) = "MediaTypeTgz"]; + TAR = 2 [(gogoproto.enumvalue_customname) = "MediaTypeTar"]; + ZIP = 3 [(gogoproto.enumvalue_customname) = "MediaTypeZip"]; + } + + enum Type { + LayerTypeInvalid = 0; // not camel cased since it isn't supposed to be used by API users + SHARED = 1 [(gogoproto.enumvalue_customname) = "LayerTypeShared"]; + EXCLUSIVE = 2 [(gogoproto.enumvalue_customname) = "LayerTypeExclusive"]; + } + + string name = 1; + string url = 2 [(gogoproto.jsontag) = "url"]; + string destination_path = 3 [(gogoproto.jsontag) = "destination_path"]; + Type layer_type = 4 [(gogoproto.jsontag) = "layer_type"]; + MediaType media_type = 5 [(gogoproto.jsontag) = "media_type"]; + DigestAlgorithm digest_algorithm = 6; + string digest_value = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/json.go b/vendor/code.cloudfoundry.org/bbs/models/json.go new file mode 100644 index 00000000..208d41bf --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/json.go @@ -0,0 +1,54 @@ +package models + +import ( + "encoding/json" + "reflect" +) + +func FromJSON(payload []byte, v Validator) error { + err := json.Unmarshal(payload, v) + if err != nil { + return err + } + return v.Validate() +} + +func ToJSON(v Validator) ([]byte, error) { + if isNil(v) { + return json.Marshal(v) + } + + if err := v.Validate(); err != nil { + return []byte{}, err + } + + return json.Marshal(v) +} + +func ToJSONArray(vs ...Validator) ([]byte, error) { + msgs := make([]*json.RawMessage, len(vs)) + + for i, v := range vs { + msg, err := ToJSON(v) + if err != nil { + return nil, err + } + + msgs[i] = (*json.RawMessage)(&msg) + } + + return json.Marshal(msgs) +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go new file mode 100644 index 00000000..0ccdf6db --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go @@ -0,0 +1,360 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: log_rate_limit.proto + +package models + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type LogRateLimit struct { + BytesPerSecond int64 `protobuf:"varint,1,opt,name=bytes_per_second,json=bytesPerSecond,proto3" json:"bytes_per_second,omitempty"` +} + +func (m *LogRateLimit) Reset() { *m = LogRateLimit{} } +func (*LogRateLimit) ProtoMessage() {} +func (*LogRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_bfeb7b5141d983ba, []int{0} +} +func (m *LogRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRateLimit.Merge(m, src) +} +func (m *LogRateLimit) XXX_Size() int { + return m.Size() +} +func (m *LogRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_LogRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRateLimit proto.InternalMessageInfo + +func (m *LogRateLimit) GetBytesPerSecond() int64 { + if m != nil { + return m.BytesPerSecond + } + return 0 +} + +func init() { + proto.RegisterType((*LogRateLimit)(nil), "models.LogRateLimit") +} + +func init() { proto.RegisterFile("log_rate_limit.proto", fileDescriptor_bfeb7b5141d983ba) } + +var fileDescriptor_bfeb7b5141d983ba = []byte{ + // 166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0xc9, 0x4f, 0x8f, + 0x2f, 0x4a, 0x2c, 0x49, 0x8d, 0xcf, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x56, 0xb2, 0xe0, 0xe2, 0xf1, 0xc9, 0x4f, 0x0f, + 0x4a, 0x2c, 0x49, 0xf5, 0x01, 0xc9, 0x0a, 0x69, 0x70, 0x09, 0x24, 0x55, 0x96, 0xa4, 0x16, 0xc7, + 0x17, 0xa4, 0x16, 0xc5, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, + 0x07, 0xf1, 0x81, 0xc5, 0x03, 0x52, 0x8b, 0x82, 0xc1, 0xa2, 0x4e, 0x26, 0x17, 0x1e, 0xca, 0x31, + 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, + 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, + 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, + 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0xd6, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xba, 0xbc, + 0x7d, 0xdf, 0x96, 0x00, 0x00, 0x00, +} + +func (this *LogRateLimit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LogRateLimit) + if !ok { + that2, ok := that.(LogRateLimit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BytesPerSecond != that1.BytesPerSecond { + return false + } + return true +} +func (this *LogRateLimit) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.LogRateLimit{") + s = append(s, "BytesPerSecond: "+fmt.Sprintf("%#v", this.BytesPerSecond)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringLogRateLimit(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *LogRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BytesPerSecond != 0 { + i = encodeVarintLogRateLimit(dAtA, i, uint64(m.BytesPerSecond)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintLogRateLimit(dAtA []byte, offset int, v uint64) int { + offset -= sovLogRateLimit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LogRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesPerSecond != 0 { + n += 1 + sovLogRateLimit(uint64(m.BytesPerSecond)) + } + return n +} + +func sovLogRateLimit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogRateLimit(x uint64) (n int) { + return sovLogRateLimit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogRateLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogRateLimit{`, + `BytesPerSecond:` + fmt.Sprintf("%v", this.BytesPerSecond) + `,`, + `}`, + }, "") + return s +} +func valueToStringLogRateLimit(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesPerSecond", wireType) + } + m.BytesPerSecond = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesPerSecond |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogRateLimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLogRateLimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogRateLimit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogRateLimit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogRateLimit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogRateLimit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogRateLimit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogRateLimit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogRateLimit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto new file mode 100644 index 00000000..7248103b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package models; + +message LogRateLimit { + int64 bytes_per_second = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go b/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go new file mode 100644 index 00000000..fda65b52 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go @@ -0,0 +1,6 @@ +package models + +type ActualLRPKeyWithSchedulingInfo struct { + Key *ActualLRPKey + SchedulingInfo *DesiredLRPSchedulingInfo +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go new file mode 100644 index 00000000..2c1e246b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go @@ -0,0 +1,101 @@ +package models + +import ( + "encoding/json" + "fmt" + "strconv" +) + +func (m *MetricTagValue) Validate() error { + var validationError ValidationError + + if m.Static != "" && m.Dynamic.Valid() { + validationError = validationError.Append(ErrInvalidField{"static"}) + validationError = validationError.Append(ErrInvalidField{"dynamic"}) + } + + if m.Static == "" && !m.Dynamic.Valid() { + validationError = validationError.Append(ErrInvalidField{"static"}) + validationError = validationError.Append(ErrInvalidField{"dynamic"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (v MetricTagValue_DynamicValue) Valid() bool { + switch v { + case MetricTagDynamicValueIndex: + return true + case MetricTagDynamicValueInstanceGuid: + return true + default: + return false + } +} + +func ConvertMetricTags(metricTags map[string]*MetricTagValue, info map[MetricTagValue_DynamicValue]interface{}) (map[string]string, error) { + tags := make(map[string]string) + for k, v := range metricTags { + if v.Dynamic > 0 { + switch v.Dynamic { + case MetricTagDynamicValueIndex: + val, ok := info[MetricTagDynamicValueIndex].(int32) + if !ok { + return nil, fmt.Errorf("could not convert value %+v of type %T to int32", info[MetricTagDynamicValueIndex], info[MetricTagDynamicValueIndex]) + } + tags[k] = strconv.FormatInt(int64(val), 10) + case MetricTagDynamicValueInstanceGuid: + val, ok := info[MetricTagDynamicValueInstanceGuid].(string) + if !ok { + return nil, fmt.Errorf("could not convert value %+v of type %T to string", info[MetricTagDynamicValueInstanceGuid], info[MetricTagDynamicValueInstanceGuid]) + } + tags[k] = val + } + } else { + tags[k] = v.Static + } + } + return tags, nil +} + +func validateMetricTags(m map[string]*MetricTagValue, metricsGuid string) ValidationError { + var validationError ValidationError + + for _, v := range m { + err := v.Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if len(m) > 0 && metricsGuid != "" { + if source_id, ok := m["source_id"]; !ok || source_id.Static != metricsGuid { + validationError = validationError.Append(ErrInvalidField{"source_id should match metrics_guid"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (v *MetricTagValue_DynamicValue) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + *v = MetricTagValue_DynamicValue(MetricTagValue_DynamicValue_value[name]) + + return nil +} + +func (v MetricTagValue_DynamicValue) MarshalJSON() ([]byte, error) { + return json.Marshal(MetricTagValue_DynamicValue_name[int32(v)]) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go new file mode 100644 index 00000000..1ab472f0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go @@ -0,0 +1,460 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: metric_tags.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricTagValue_DynamicValue int32 + +const ( + DynamicValueInvalid MetricTagValue_DynamicValue = 0 + MetricTagDynamicValueIndex MetricTagValue_DynamicValue = 1 + MetricTagDynamicValueInstanceGuid MetricTagValue_DynamicValue = 2 +) + +var MetricTagValue_DynamicValue_name = map[int32]string{ + 0: "DynamicValueInvalid", + 1: "INDEX", + 2: "INSTANCE_GUID", +} + +var MetricTagValue_DynamicValue_value = map[string]int32{ + "DynamicValueInvalid": 0, + "INDEX": 1, + "INSTANCE_GUID": 2, +} + +func (MetricTagValue_DynamicValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6fa2ee0541447d5e, []int{0, 0} +} + +type MetricTagValue struct { + // Note: we only expect one of the following set of fields to be + // set. + Static string `protobuf:"bytes,1,opt,name=static,proto3" json:"static,omitempty"` + Dynamic MetricTagValue_DynamicValue `protobuf:"varint,2,opt,name=dynamic,proto3,enum=models.MetricTagValue_DynamicValue" json:"dynamic,omitempty"` +} + +func (m *MetricTagValue) Reset() { *m = MetricTagValue{} } +func (*MetricTagValue) ProtoMessage() {} +func (*MetricTagValue) Descriptor() ([]byte, []int) { + return fileDescriptor_6fa2ee0541447d5e, []int{0} +} +func (m *MetricTagValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTagValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricTagValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricTagValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTagValue.Merge(m, src) +} +func (m *MetricTagValue) XXX_Size() int { + return m.Size() +} +func (m *MetricTagValue) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTagValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricTagValue proto.InternalMessageInfo + +func (m *MetricTagValue) GetStatic() string { + if m != nil { + return m.Static + } + return "" +} + +func (m *MetricTagValue) GetDynamic() MetricTagValue_DynamicValue { + if m != nil { + return m.Dynamic + } + return DynamicValueInvalid +} + +func init() { + proto.RegisterEnum("models.MetricTagValue_DynamicValue", MetricTagValue_DynamicValue_name, MetricTagValue_DynamicValue_value) + proto.RegisterType((*MetricTagValue)(nil), "models.MetricTagValue") +} + +func init() { proto.RegisterFile("metric_tags.proto", fileDescriptor_6fa2ee0541447d5e) } + +var fileDescriptor_6fa2ee0541447d5e = []byte{ + // 296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4d, 0x2d, 0x29, + 0xca, 0x4c, 0x8e, 0x2f, 0x49, 0x4c, 0x2f, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, + 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, + 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, + 0x98, 0x05, 0xd1, 0xa6, 0xf4, 0x8d, 0x91, 0x8b, 0xcf, 0x17, 0x6c, 0x58, 0x48, 0x62, 0x7a, 0x58, + 0x62, 0x4e, 0x69, 0xaa, 0x90, 0x18, 0x17, 0x5b, 0x71, 0x49, 0x62, 0x49, 0x66, 0xb2, 0x04, 0xa3, + 0x02, 0xa3, 0x06, 0x67, 0x10, 0x94, 0x27, 0x64, 0xcb, 0xc5, 0x9e, 0x52, 0x99, 0x97, 0x98, 0x9b, + 0x99, 0x2c, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x67, 0xa4, 0xac, 0x07, 0xb1, 0x53, 0x0f, 0xd5, 0x00, + 0x3d, 0x17, 0x88, 0x2a, 0x30, 0x27, 0x08, 0xa6, 0x47, 0xa9, 0x87, 0x91, 0x8b, 0x07, 0x59, 0x46, + 0x48, 0x9c, 0x4b, 0x18, 0x99, 0xef, 0x99, 0x57, 0x96, 0x98, 0x93, 0x99, 0x22, 0xc0, 0x20, 0xa4, + 0xc9, 0xc5, 0xea, 0xe9, 0xe7, 0xe2, 0x1a, 0x21, 0xc0, 0x28, 0x25, 0xd7, 0x35, 0x57, 0x41, 0x0a, + 0x6e, 0x3c, 0xaa, 0xf2, 0x94, 0xd4, 0x0a, 0x21, 0x0b, 0x2e, 0x5e, 0x4f, 0xbf, 0xe0, 0x10, 0x47, + 0x3f, 0x67, 0xd7, 0x78, 0xf7, 0x50, 0x4f, 0x17, 0x01, 0x26, 0x29, 0xd5, 0xae, 0xb9, 0x0a, 0x8a, + 0x38, 0xb4, 0x14, 0x97, 0x24, 0xe6, 0x25, 0xa7, 0xba, 0x97, 0x66, 0xa6, 0x38, 0x99, 0x5c, 0x78, + 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, + 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, + 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, + 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0x87, 0x9a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0x9b, 0x83, 0x77, 0xc8, 0x81, 0x01, 0x00, 0x00, +} + +func (x MetricTagValue_DynamicValue) String() string { + s, ok := MetricTagValue_DynamicValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *MetricTagValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricTagValue) + if !ok { + that2, ok := that.(MetricTagValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Static != that1.Static { + return false + } + if this.Dynamic != that1.Dynamic { + return false + } + return true +} +func (this *MetricTagValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.MetricTagValue{") + s = append(s, "Static: "+fmt.Sprintf("%#v", this.Static)+",\n") + s = append(s, "Dynamic: "+fmt.Sprintf("%#v", this.Dynamic)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringMetricTags(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *MetricTagValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricTagValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTagValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Dynamic != 0 { + i = encodeVarintMetricTags(dAtA, i, uint64(m.Dynamic)) + i-- + dAtA[i] = 0x10 + } + if len(m.Static) > 0 { + i -= len(m.Static) + copy(dAtA[i:], m.Static) + i = encodeVarintMetricTags(dAtA, i, uint64(len(m.Static))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetricTags(dAtA []byte, offset int, v uint64) int { + offset -= sovMetricTags(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MetricTagValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Static) + if l > 0 { + n += 1 + l + sovMetricTags(uint64(l)) + } + if m.Dynamic != 0 { + n += 1 + sovMetricTags(uint64(m.Dynamic)) + } + return n +} + +func sovMetricTags(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetricTags(x uint64) (n int) { + return sovMetricTags(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MetricTagValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricTagValue{`, + `Static:` + fmt.Sprintf("%v", this.Static) + `,`, + `Dynamic:` + fmt.Sprintf("%v", this.Dynamic) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetricTags(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MetricTagValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricTagValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricTagValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Static", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetricTags + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetricTags + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Static = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dynamic", wireType) + } + m.Dynamic = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dynamic |= MetricTagValue_DynamicValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetricTags(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetricTags + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetricTags(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetricTags + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetricTags + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetricTags + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetricTags = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetricTags = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetricTags = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto new file mode 100644 index 00000000..18293e0f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message MetricTagValue { + enum DynamicValue { + DynamicValueInvalid = 0; + INDEX = 1 [(gogoproto.enumvalue_customname) = "MetricTagDynamicValueIndex"]; + INSTANCE_GUID = 2 [(gogoproto.enumvalue_customname) = "MetricTagDynamicValueInstanceGuid"]; + } + + // Note: we only expect one of the following set of fields to be + // set. + string static = 1; + DynamicValue dynamic = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/models.go b/vendor/code.cloudfoundry.org/bbs/models/models.go new file mode 100644 index 00000000..5c8c93ab --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/models.go @@ -0,0 +1,8 @@ +package models + +//go:generate bash ../scripts/generate_protos.sh + +const ( + maximumAnnotationLength = 10 * 1024 + maximumRouteLength = 128 * 1024 +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go new file mode 100644 index 00000000..d9d4aaa1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go @@ -0,0 +1,20 @@ +package models + +func NewModificationTag(epoch string, index uint32) ModificationTag { + return ModificationTag{ + Epoch: epoch, + Index: index, + } +} + +func (t *ModificationTag) Increment() { + t.Index++ +} + +func (m *ModificationTag) SucceededBy(other *ModificationTag) bool { + if m == nil || m.Epoch == "" || other.Epoch == "" { + return true + } + + return m.Epoch != other.Epoch || m.Index < other.Index +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go new file mode 100644 index 00000000..e1fe576e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go @@ -0,0 +1,419 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: modification_tag.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ModificationTag struct { + Epoch string `protobuf:"bytes,1,opt,name=epoch,proto3" json:"epoch"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` +} + +func (m *ModificationTag) Reset() { *m = ModificationTag{} } +func (*ModificationTag) ProtoMessage() {} +func (*ModificationTag) Descriptor() ([]byte, []int) { + return fileDescriptor_b84c9c806e96b4e3, []int{0} +} +func (m *ModificationTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModificationTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ModificationTag.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ModificationTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModificationTag.Merge(m, src) +} +func (m *ModificationTag) XXX_Size() int { + return m.Size() +} +func (m *ModificationTag) XXX_DiscardUnknown() { + xxx_messageInfo_ModificationTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ModificationTag proto.InternalMessageInfo + +func (m *ModificationTag) GetEpoch() string { + if m != nil { + return m.Epoch + } + return "" +} + +func (m *ModificationTag) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func init() { + proto.RegisterType((*ModificationTag)(nil), "models.ModificationTag") +} + +func init() { proto.RegisterFile("modification_tag.proto", fileDescriptor_b84c9c806e96b4e3) } + +var fileDescriptor_b84c9c806e96b4e3 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcb, 0xcd, 0x4f, 0xc9, + 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x49, 0x4c, 0xd7, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, + 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x14, 0xcc, 0xc5, 0xef, 0x8b, 0x64, 0x60, + 0x48, 0x62, 0xba, 0x90, 0x3c, 0x17, 0x6b, 0x6a, 0x41, 0x7e, 0x72, 0x86, 0x04, 0xa3, 0x02, 0xa3, + 0x06, 0xa7, 0x13, 0xe7, 0xab, 0x7b, 0xf2, 0x10, 0x81, 0x20, 0x08, 0x05, 0x52, 0x90, 0x99, 0x97, + 0x92, 0x5a, 0x21, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x0b, 0x51, 0x00, 0x16, 0x08, 0x82, 0x50, 0x4e, + 0x26, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, + 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, + 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, + 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x2e, 0x32, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x12, 0xa6, 0xaa, 0xa3, 0xe2, 0x00, 0x00, 0x00, +} + +func (this *ModificationTag) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ModificationTag) + if !ok { + that2, ok := that.(ModificationTag) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Epoch != that1.Epoch { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ModificationTag) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ModificationTag{") + s = append(s, "Epoch: "+fmt.Sprintf("%#v", this.Epoch)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringModificationTag(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ModificationTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModificationTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModificationTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintModificationTag(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.Epoch) > 0 { + i -= len(m.Epoch) + copy(dAtA[i:], m.Epoch) + i = encodeVarintModificationTag(dAtA, i, uint64(len(m.Epoch))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintModificationTag(dAtA []byte, offset int, v uint64) int { + offset -= sovModificationTag(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ModificationTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Epoch) + if l > 0 { + n += 1 + l + sovModificationTag(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovModificationTag(uint64(m.Index)) + } + return n +} + +func sovModificationTag(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozModificationTag(x uint64) (n int) { + return sovModificationTag(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ModificationTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ModificationTag{`, + `Epoch:` + fmt.Sprintf("%v", this.Epoch) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func valueToStringModificationTag(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ModificationTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ModificationTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ModificationTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthModificationTag + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthModificationTag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epoch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipModificationTag(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthModificationTag + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipModificationTag(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthModificationTag + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupModificationTag + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthModificationTag + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthModificationTag = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowModificationTag = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupModificationTag = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto new file mode 100644 index 00000000..0b413b54 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message ModificationTag { + string epoch = 1 [(gogoproto.jsontag) = "epoch"]; + uint32 index = 2 [(gogoproto.jsontag) = "index"]; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/network.pb.go b/vendor/code.cloudfoundry.org/bbs/models/network.pb.go new file mode 100644 index 00000000..6bd2eb70 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/network.pb.go @@ -0,0 +1,522 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: network.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Network struct { + Properties map[string]string `protobuf:"bytes,1,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { + return fileDescriptor_8571034d60397816, []int{0} +} +func (m *Network) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Network) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Network.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Network) XXX_Merge(src proto.Message) { + xxx_messageInfo_Network.Merge(m, src) +} +func (m *Network) XXX_Size() int { + return m.Size() +} +func (m *Network) XXX_DiscardUnknown() { + xxx_messageInfo_Network.DiscardUnknown(m) +} + +var xxx_messageInfo_Network proto.InternalMessageInfo + +func (m *Network) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func init() { + proto.RegisterType((*Network)(nil), "models.Network") + proto.RegisterMapType((map[string]string)(nil), "models.Network.PropertiesEntry") +} + +func init() { proto.RegisterFile("network.proto", fileDescriptor_8571034d60397816) } + +var fileDescriptor_8571034d60397816 = []byte{ + // 247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4b, 0x2d, 0x29, + 0xcf, 0x2f, 0xca, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, + 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, + 0xb4, 0x98, 0x91, 0x8b, 0xdd, 0x0f, 0x62, 0x90, 0x50, 0x24, 0x17, 0x57, 0x41, 0x51, 0x7e, 0x41, + 0x6a, 0x51, 0x49, 0x66, 0x6a, 0xb1, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, 0xc4, + 0x5c, 0x3d, 0xa8, 0x22, 0xbd, 0x00, 0xb8, 0x0a, 0xd7, 0xbc, 0x92, 0xa2, 0x4a, 0x27, 0x89, 0x57, + 0xf7, 0xe4, 0x45, 0x10, 0xda, 0x74, 0xf2, 0x73, 0x33, 0x4b, 0x52, 0x73, 0x0b, 0x4a, 0x2a, 0x83, + 0x90, 0x0c, 0x93, 0xb2, 0xe5, 0xe2, 0x47, 0xd3, 0x28, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, + 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, + 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x27, 0x93, 0x0b, 0x0f, 0xe5, + 0x18, 0x6f, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, + 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, + 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, + 0x8d, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x2f, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x38, + 0x3c, 0x9d, 0x4b, 0x2a, 0x01, 0x00, 0x00, +} + +func (this *Network) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Network) + if !ok { + that2, ok := that.(Network) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Properties) != len(that1.Properties) { + return false + } + for i := range this.Properties { + if this.Properties[i] != that1.Properties[i] { + return false + } + } + return true +} +func (this *Network) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.Network{") + keysForProperties := make([]string, 0, len(this.Properties)) + for k, _ := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]string{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%#v: %#v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + if this.Properties != nil { + s = append(s, "Properties: "+mapStringForProperties+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNetwork(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Network) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Properties) > 0 { + for k := range m.Properties { + v := m.Properties[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintNetwork(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintNetwork(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintNetwork(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintNetwork(dAtA []byte, offset int, v uint64) int { + offset -= sovNetwork(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Network) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovNetwork(uint64(len(k))) + 1 + len(v) + sovNetwork(uint64(len(v))) + n += mapEntrySize + 1 + sovNetwork(uint64(mapEntrySize)) + } + } + return n +} + +func sovNetwork(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNetwork(x uint64) (n int) { + return sovNetwork(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + keysForProperties := make([]string, 0, len(this.Properties)) + for k, _ := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]string{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + s := strings.Join([]string{`&Network{`, + `Properties:` + mapStringForProperties + `,`, + `}`, + }, "") + return s +} +func valueToStringNetwork(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNetwork + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNetwork + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNetwork + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNetwork + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNetwork + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthNetwork + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNetwork(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthNetwork + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetwork(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthNetwork + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNetwork(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNetwork + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNetwork + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNetwork + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNetwork = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNetwork = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNetwork = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/network.proto b/vendor/code.cloudfoundry.org/bbs/models/network.proto new file mode 100644 index 00000000..95185656 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/network.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message Network { + map properties = 1 [(gogoproto.jsontag) = "properties,omitempty"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/package.go b/vendor/code.cloudfoundry.org/bbs/models/package.go new file mode 100644 index 00000000..ab6aeab0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/package.go @@ -0,0 +1 @@ +package models // import "code.cloudfoundry.org/bbs/models" diff --git a/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go b/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go new file mode 100644 index 00000000..6acb0b54 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ping.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PingResponse struct { + Available bool `protobuf:"varint,1,opt,name=available,proto3" json:"available"` +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6d51d96c3ad891f5, []int{0} +} +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return m.Size() +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +func (m *PingResponse) GetAvailable() bool { + if m != nil { + return m.Available + } + return false +} + +func init() { + proto.RegisterType((*PingResponse)(nil), "models.PingResponse") +} + +func init() { proto.RegisterFile("ping.proto", fileDescriptor_6d51d96c3ad891f5) } + +var fileDescriptor_6d51d96c3ad891f5 = []byte{ + // 181 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x64, 0xcd, 0xc5, + 0x13, 0x90, 0x99, 0x97, 0x1e, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0xa4, 0xcd, 0xc5, + 0x99, 0x58, 0x96, 0x98, 0x99, 0x93, 0x98, 0x94, 0x93, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe1, + 0xc4, 0xfb, 0xea, 0x9e, 0x3c, 0x42, 0x30, 0x08, 0xc1, 0x74, 0x32, 0xb9, 0xf0, 0x50, 0x8e, 0xe1, + 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, + 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, + 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0xb6, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x41, 0x03, + 0x47, 0xbe, 0x00, 0x00, 0x00, +} + +func (this *PingResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PingResponse) + if !ok { + that2, ok := that.(PingResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Available != that1.Available { + return false + } + return true +} +func (this *PingResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.PingResponse{") + s = append(s, "Available: "+fmt.Sprintf("%#v", this.Available)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringPing(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Available { + i-- + if m.Available { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPing(dAtA []byte, offset int, v uint64) int { + offset -= sovPing(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Available { + n += 2 + } + return n +} + +func sovPing(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPing(x uint64) (n int) { + return sovPing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PingResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PingResponse{`, + `Available:` + fmt.Sprintf("%v", this.Available) + `,`, + `}`, + }, "") + return s +} +func valueToStringPing(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Available", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Available = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPing(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPing + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPing(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPing + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPing + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPing + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPing = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPing = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPing = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/ping.proto b/vendor/code.cloudfoundry.org/bbs/models/ping.proto new file mode 100644 index 00000000..2841864f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/ping.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message PingResponse { + bool available = 1 [(gogoproto.jsontag) = "available"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go b/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go new file mode 100644 index 00000000..1246dbcc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go @@ -0,0 +1,85 @@ +package models + +import ( + "fmt" + "math" + "time" +) + +const DefaultImmediateRestarts = 3 +const DefaultMaxBackoffDuration = 16 * time.Minute +const DefaultMaxRestarts = 200 + +const CrashBackoffMinDuration = 30 * time.Second + +func exponentialBackoff(exponent, max int32) time.Duration { + if exponent > max { + exponent = max + } + return CrashBackoffMinDuration * time.Duration(powerOfTwo(exponent)) +} + +func powerOfTwo(pow int32) int32 { + if pow < 0 { + panic("pow cannot be negative") + } + return 1 << uint(pow) +} + +func calculateMaxBackoffCount(maxDuration time.Duration) int32 { + total := math.Ceil(float64(maxDuration) / float64(CrashBackoffMinDuration)) + return int32(math.Logb(total)) +} + +type RestartCalculator struct { + ImmediateRestarts int32 `json:"immediate_restarts"` + MaxBackoffCount int32 `json:"max_backoff_count"` + MaxBackoffDuration time.Duration `json:"max_backoff_duration"` + MaxRestartAttempts int32 `json:"max_restart_attempts"` +} + +func NewDefaultRestartCalculator() RestartCalculator { + return NewRestartCalculator(DefaultImmediateRestarts, DefaultMaxBackoffDuration, DefaultMaxRestarts) +} + +func NewRestartCalculator(immediateRestarts int32, maxBackoffDuration time.Duration, maxRestarts int32) RestartCalculator { + return RestartCalculator{ + ImmediateRestarts: immediateRestarts, + MaxBackoffDuration: maxBackoffDuration, + MaxBackoffCount: calculateMaxBackoffCount(maxBackoffDuration), + MaxRestartAttempts: maxRestarts, + } +} + +func (r RestartCalculator) Validate() error { + var validationError ValidationError + if r.MaxBackoffDuration < CrashBackoffMinDuration { + err := fmt.Errorf("MaxBackoffDuration '%s' must be larger than CrashBackoffMinDuration '%s'", r.MaxBackoffDuration, CrashBackoffMinDuration) + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (r RestartCalculator) ShouldRestart(now, crashedAt int64, crashCount int32) bool { + switch { + case crashCount < r.ImmediateRestarts: + return true + + case crashCount < r.MaxRestartAttempts: + backoffDuration := exponentialBackoff(crashCount-r.ImmediateRestarts, r.MaxBackoffCount) + if backoffDuration > r.MaxBackoffDuration { + backoffDuration = r.MaxBackoffDuration + } + nextRestartTime := crashedAt + backoffDuration.Nanoseconds() + if nextRestartTime <= now { + return true + } + } + + return false +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/routes.go b/vendor/code.cloudfoundry.org/bbs/models/routes.go new file mode 100644 index 00000000..6097fe12 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/routes.go @@ -0,0 +1,77 @@ +package models + +import ( + "bytes" + "encoding/json" +) + +type Routes map[string]*json.RawMessage + +func (r *Routes) protoRoutes() *ProtoRoutes { + pr := &ProtoRoutes{ + Routes: map[string][]byte{}, + } + + for k, v := range *r { + pr.Routes[k] = *v + } + + return pr +} + +func (r *Routes) Marshal() ([]byte, error) { + return r.protoRoutes().Marshal() +} + +func (r *Routes) MarshalTo(data []byte) (n int, err error) { + return r.protoRoutes().MarshalTo(data) +} + +func (r *Routes) Unmarshal(data []byte) error { + pr := &ProtoRoutes{} + err := pr.Unmarshal(data) + if err != nil { + return err + } + + if pr.Routes == nil { + return nil + } + + routes := map[string]*json.RawMessage{} + for k, v := range pr.Routes { + raw := json.RawMessage(v) + routes[k] = &raw + } + *r = routes + + return nil +} + +func (r *Routes) Size() int { + if r == nil { + return 0 + } + + return r.protoRoutes().Size() +} + +func (r *Routes) Equal(other Routes) bool { + for k, v := range *r { + if !bytes.Equal(*v, *other[k]) { + return false + } + } + return true +} + +func (r Routes) Validate() error { + totalRoutesLength := 0 + for _, value := range r { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + return ErrInvalidField{"routes"} + } + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go b/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go new file mode 100644 index 00000000..87b40e03 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go @@ -0,0 +1,1282 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: security_group.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PortRange struct { + Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` + End uint32 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` +} + +func (m *PortRange) Reset() { *m = PortRange{} } +func (*PortRange) ProtoMessage() {} +func (*PortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{0} +} +func (m *PortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortRange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortRange.Merge(m, src) +} +func (m *PortRange) XXX_Size() int { + return m.Size() +} +func (m *PortRange) XXX_DiscardUnknown() { + xxx_messageInfo_PortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_PortRange proto.InternalMessageInfo + +func (m *PortRange) GetStart() uint32 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *PortRange) GetEnd() uint32 { + if m != nil { + return m.End + } + return 0 +} + +type ICMPInfo struct { + Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type"` + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code"` +} + +func (m *ICMPInfo) Reset() { *m = ICMPInfo{} } +func (*ICMPInfo) ProtoMessage() {} +func (*ICMPInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{1} +} +func (m *ICMPInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ICMPInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ICMPInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ICMPInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ICMPInfo.Merge(m, src) +} +func (m *ICMPInfo) XXX_Size() int { + return m.Size() +} +func (m *ICMPInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ICMPInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ICMPInfo proto.InternalMessageInfo + +func (m *ICMPInfo) GetType() int32 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *ICMPInfo) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +type SecurityGroupRule struct { + Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` + Destinations []string `protobuf:"bytes,2,rep,name=destinations,proto3" json:"destinations,omitempty"` + Ports []uint32 `protobuf:"varint,3,rep,name=ports,proto3" json:"ports,omitempty"` + PortRange *PortRange `protobuf:"bytes,4,opt,name=port_range,json=portRange,proto3" json:"port_range,omitempty"` + IcmpInfo *ICMPInfo `protobuf:"bytes,5,opt,name=icmp_info,json=icmpInfo,proto3" json:"icmp_info,omitempty"` + Log bool `protobuf:"varint,6,opt,name=log,proto3" json:"log"` + Annotations []string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty"` +} + +func (m *SecurityGroupRule) Reset() { *m = SecurityGroupRule{} } +func (*SecurityGroupRule) ProtoMessage() {} +func (*SecurityGroupRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{2} +} +func (m *SecurityGroupRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityGroupRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SecurityGroupRule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SecurityGroupRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityGroupRule.Merge(m, src) +} +func (m *SecurityGroupRule) XXX_Size() int { + return m.Size() +} +func (m *SecurityGroupRule) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityGroupRule.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityGroupRule proto.InternalMessageInfo + +func (m *SecurityGroupRule) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func (m *SecurityGroupRule) GetDestinations() []string { + if m != nil { + return m.Destinations + } + return nil +} + +func (m *SecurityGroupRule) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *SecurityGroupRule) GetPortRange() *PortRange { + if m != nil { + return m.PortRange + } + return nil +} + +func (m *SecurityGroupRule) GetIcmpInfo() *ICMPInfo { + if m != nil { + return m.IcmpInfo + } + return nil +} + +func (m *SecurityGroupRule) GetLog() bool { + if m != nil { + return m.Log + } + return false +} + +func (m *SecurityGroupRule) GetAnnotations() []string { + if m != nil { + return m.Annotations + } + return nil +} + +func init() { + proto.RegisterType((*PortRange)(nil), "models.PortRange") + proto.RegisterType((*ICMPInfo)(nil), "models.ICMPInfo") + proto.RegisterType((*SecurityGroupRule)(nil), "models.SecurityGroupRule") +} + +func init() { proto.RegisterFile("security_group.proto", fileDescriptor_ff465b8f55f128fd) } + +var fileDescriptor_ff465b8f55f128fd = []byte{ + // 402 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xb1, 0x6e, 0xdb, 0x30, + 0x10, 0x86, 0x45, 0x2b, 0x76, 0x24, 0xa6, 0x01, 0x12, 0xa2, 0x03, 0x13, 0x14, 0x94, 0xa0, 0x49, + 0x4b, 0x94, 0xa2, 0xed, 0x13, 0xa8, 0x40, 0x83, 0x0c, 0x05, 0x02, 0xf6, 0x01, 0x0c, 0x59, 0xa2, + 0x55, 0x01, 0x32, 0x4f, 0x90, 0xa8, 0x21, 0x5b, 0xf7, 0x2e, 0x7d, 0x8c, 0x3e, 0x4a, 0x47, 0x8f, + 0x99, 0x8c, 0x5a, 0x5e, 0x0a, 0x4f, 0x79, 0x84, 0x82, 0x27, 0xc7, 0x68, 0x97, 0x13, 0xff, 0xff, + 0xd7, 0x1d, 0xee, 0x3e, 0xfa, 0xba, 0x53, 0x79, 0xdf, 0x56, 0xe6, 0x71, 0x5e, 0xb6, 0xd0, 0x37, + 0x49, 0xd3, 0x82, 0x01, 0x36, 0x5b, 0x41, 0xa1, 0xea, 0xee, 0xfa, 0xa6, 0xac, 0xcc, 0xd7, 0x7e, + 0x91, 0xe4, 0xb0, 0xba, 0x2d, 0xa1, 0x84, 0x5b, 0x8c, 0x17, 0xfd, 0x12, 0x15, 0x0a, 0x7c, 0x8d, + 0x6d, 0xd1, 0x1d, 0xf5, 0x1f, 0xa0, 0x35, 0x32, 0xd3, 0xa5, 0x62, 0x01, 0x9d, 0x76, 0x26, 0x6b, + 0x0d, 0x27, 0x21, 0x89, 0xcf, 0x53, 0x7f, 0xbf, 0x09, 0x46, 0x43, 0x8e, 0x1f, 0x76, 0x45, 0x5d, + 0xa5, 0x0b, 0x3e, 0xc1, 0xf8, 0x74, 0xbf, 0x09, 0xac, 0x94, 0xb6, 0x44, 0x9f, 0xa8, 0x77, 0xff, + 0xf1, 0xf3, 0xc3, 0xbd, 0x5e, 0x02, 0x7b, 0x43, 0x4f, 0xcc, 0x63, 0xa3, 0x70, 0xcc, 0x34, 0xf5, + 0xf6, 0x9b, 0x00, 0xb5, 0xc4, 0x6a, 0xd3, 0x1c, 0x0a, 0x85, 0x53, 0x0e, 0xa9, 0xd5, 0x12, 0x6b, + 0xf4, 0x7d, 0x42, 0x2f, 0xbf, 0x1c, 0x0e, 0xbc, 0xb3, 0xf7, 0xc9, 0xbe, 0x56, 0xec, 0x9a, 0x7a, + 0xb8, 0x6f, 0x0e, 0x35, 0x4e, 0xf5, 0xe5, 0x51, 0xb3, 0x88, 0xbe, 0x2a, 0x54, 0x67, 0x2a, 0x9d, + 0x99, 0x0a, 0x74, 0xc7, 0x27, 0xa1, 0x1b, 0xfb, 0xf2, 0x3f, 0x8f, 0x71, 0x3a, 0x6d, 0xa0, 0x35, + 0x1d, 0x77, 0x43, 0x37, 0x3e, 0x4f, 0x27, 0x17, 0x8e, 0x1c, 0x0d, 0xf6, 0x96, 0x52, 0xfb, 0x98, + 0xb7, 0x96, 0x00, 0x3f, 0x09, 0x49, 0x7c, 0xf6, 0xee, 0x32, 0x19, 0x61, 0x26, 0x47, 0x34, 0xd2, + 0x6f, 0x8e, 0x94, 0x6e, 0xa8, 0x5f, 0xe5, 0xab, 0x66, 0x5e, 0xe9, 0x25, 0xf0, 0x29, 0x36, 0x5c, + 0xbc, 0x34, 0xbc, 0x20, 0x90, 0x9e, 0xfd, 0x05, 0x61, 0x5c, 0x51, 0xb7, 0x86, 0x92, 0xcf, 0x42, + 0x12, 0x7b, 0x23, 0xb3, 0x1a, 0x4a, 0x69, 0x0b, 0x0b, 0xe9, 0x59, 0xa6, 0x35, 0x98, 0xc3, 0xe2, + 0xa7, 0xb8, 0xf8, 0xbf, 0x56, 0xfa, 0x61, 0xbd, 0x15, 0xce, 0xd3, 0x56, 0x38, 0xcf, 0x5b, 0x41, + 0xbe, 0x0d, 0x82, 0xfc, 0x1c, 0x04, 0xf9, 0x35, 0x08, 0xb2, 0x1e, 0x04, 0xf9, 0x3d, 0x08, 0xf2, + 0x67, 0x10, 0xce, 0xf3, 0x20, 0xc8, 0x8f, 0x9d, 0x70, 0xd6, 0x3b, 0xe1, 0x3c, 0xed, 0x84, 0xb3, + 0x98, 0x21, 0x9b, 0xf7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xd0, 0x92, 0x85, 0x2a, 0x02, + 0x00, 0x00, +} + +func (this *PortRange) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PortRange) + if !ok { + that2, ok := that.(PortRange) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Start != that1.Start { + return false + } + if this.End != that1.End { + return false + } + return true +} +func (this *ICMPInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ICMPInfo) + if !ok { + that2, ok := that.(ICMPInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Code != that1.Code { + return false + } + return true +} +func (this *SecurityGroupRule) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SecurityGroupRule) + if !ok { + that2, ok := that.(SecurityGroupRule) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Protocol != that1.Protocol { + return false + } + if len(this.Destinations) != len(that1.Destinations) { + return false + } + for i := range this.Destinations { + if this.Destinations[i] != that1.Destinations[i] { + return false + } + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if !this.PortRange.Equal(that1.PortRange) { + return false + } + if !this.IcmpInfo.Equal(that1.IcmpInfo) { + return false + } + if this.Log != that1.Log { + return false + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if this.Annotations[i] != that1.Annotations[i] { + return false + } + } + return true +} +func (this *PortRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.PortRange{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ICMPInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ICMPInfo{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SecurityGroupRule) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.SecurityGroupRule{") + s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n") + s = append(s, "Destinations: "+fmt.Sprintf("%#v", this.Destinations)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + if this.PortRange != nil { + s = append(s, "PortRange: "+fmt.Sprintf("%#v", this.PortRange)+",\n") + } + if this.IcmpInfo != nil { + s = append(s, "IcmpInfo: "+fmt.Sprintf("%#v", this.IcmpInfo)+",\n") + } + s = append(s, "Log: "+fmt.Sprintf("%#v", this.Log)+",\n") + s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSecurityGroup(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.End != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ICMPInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ICMPInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ICMPInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Code != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SecurityGroupRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityGroupRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityGroupRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Annotations[iNdEx]) + copy(dAtA[i:], m.Annotations[iNdEx]) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Annotations[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.Log { + i-- + if m.Log { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IcmpInfo != nil { + { + size, err := m.IcmpInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSecurityGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.PortRange != nil { + { + size, err := m.PortRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSecurityGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + if len(m.Destinations) > 0 { + for iNdEx := len(m.Destinations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Destinations[iNdEx]) + copy(dAtA[i:], m.Destinations[iNdEx]) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Destinations[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Protocol) > 0 { + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSecurityGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovSecurityGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PortRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovSecurityGroup(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovSecurityGroup(uint64(m.End)) + } + return n +} + +func (m *ICMPInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSecurityGroup(uint64(m.Type)) + } + if m.Code != 0 { + n += 1 + sovSecurityGroup(uint64(m.Code)) + } + return n +} + +func (m *SecurityGroupRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Protocol) + if l > 0 { + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if len(m.Destinations) > 0 { + for _, s := range m.Destinations { + l = len(s) + n += 1 + l + sovSecurityGroup(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovSecurityGroup(uint64(e)) + } + } + if m.PortRange != nil { + l = m.PortRange.Size() + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if m.IcmpInfo != nil { + l = m.IcmpInfo.Size() + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if m.Log { + n += 2 + } + if len(m.Annotations) > 0 { + for _, s := range m.Annotations { + l = len(s) + n += 1 + l + sovSecurityGroup(uint64(l)) + } + } + return n +} + +func sovSecurityGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSecurityGroup(x uint64) (n int) { + return sovSecurityGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortRange{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `}`, + }, "") + return s +} +func (this *ICMPInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ICMPInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityGroupRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityGroupRule{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Destinations:` + fmt.Sprintf("%v", this.Destinations) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `PortRange:` + strings.Replace(this.PortRange.String(), "PortRange", "PortRange", 1) + `,`, + `IcmpInfo:` + strings.Replace(this.IcmpInfo.String(), "ICMPInfo", "ICMPInfo", 1) + `,`, + `Log:` + fmt.Sprintf("%v", this.Log) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `}`, + }, "") + return s +} +func valueToStringSecurityGroup(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ICMPInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ICMPInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ICMPInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityGroupRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityGroupRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityGroupRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destinations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Destinations = append(m.Destinations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortRange == nil { + m.PortRange = &PortRange{} + } + if err := m.PortRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IcmpInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IcmpInfo == nil { + m.IcmpInfo = &ICMPInfo{} + } + if err := m.IcmpInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Log = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSecurityGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSecurityGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSecurityGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSecurityGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSecurityGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSecurityGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSecurityGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_group.proto b/vendor/code.cloudfoundry.org/bbs/models/security_group.proto new file mode 100644 index 00000000..ad28c29c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_group.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message PortRange { + uint32 start = 1 [(gogoproto.jsontag) = "start"]; + uint32 end = 2 [(gogoproto.jsontag) = "end"]; +} + +message ICMPInfo { + int32 type = 1 [(gogoproto.jsontag) = "type"]; + int32 code = 2 [(gogoproto.jsontag) = "code"]; +} + +message SecurityGroupRule { + string protocol = 1; + repeated string destinations = 2; + repeated uint32 ports = 3 [packed = false]; + PortRange port_range = 4; + ICMPInfo icmp_info = 5; + bool log = 6 [(gogoproto.jsontag) = "log"]; + repeated string annotations = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_groups.go b/vendor/code.cloudfoundry.org/bbs/models/security_groups.go new file mode 100644 index 00000000..8ea22619 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_groups.go @@ -0,0 +1,157 @@ +package models + +import ( + "errors" + "net" + "strings" +) + +const ( + TCPProtocol = "tcp" + UDPProtocol = "udp" + ICMPProtocol = "icmp" + AllProtocol = "all" +) + +var errInvalidIP = errors.New("Invalid IP") + +func (rule SecurityGroupRule) Validate() error { + var validationError ValidationError + + switch rule.GetProtocol() { + case TCPProtocol: + validationError = rule.validatePorts() + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case UDPProtocol: + validationError = rule.validatePorts() + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case ICMPProtocol: + if rule.PortRange != nil { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.Ports != nil { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + if rule.IcmpInfo == nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case AllProtocol: + if rule.PortRange != nil { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.Ports != nil { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + default: + validationError = validationError.Append(ErrInvalidField{"protocol"}) + } + + if err := rule.validateDestinations(); err != nil { + validationError = validationError.Append(ErrInvalidField{"destinations [ " + err.Error() + " ]"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (rule SecurityGroupRule) validatePorts() ValidationError { + var validationError ValidationError + + if rule.PortRange == nil && rule.Ports == nil { + return validationError.Append(errors.New("Missing required field: ports or port_range")) + } + + if rule.PortRange != nil && rule.Ports != nil { + return validationError.Append(errors.New("Invalid: ports and port_range provided")) + } + + if rule.PortRange != nil { + if rule.GetPortRange().GetStart() < 1 { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.GetPortRange().GetEnd() < 1 { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.GetPortRange().GetStart() > rule.GetPortRange().GetEnd() { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + } + + if rule.Ports != nil { + if len(rule.Ports) == 0 { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + + for _, p := range rule.Ports { + if p < 1 { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + } + } + + return validationError +} + +func (rule SecurityGroupRule) validateDestinations() error { + if len(rule.Destinations) == 0 { + return errors.New("Must have at least 1 destination") + } + + var validationError ValidationError + + var destinations []string + for _, d := range rule.Destinations { + destinations = append(destinations, strings.Split(d, ",")...) + } + + for _, d := range destinations { + n := strings.IndexAny(d, "-/") + if n == -1 { + if net.ParseIP(d) == nil { + validationError = validationError.Append(errInvalidIP) + continue + } + } else if d[n] == '/' { + _, _, err := net.ParseCIDR(d) + if err != nil { + validationError = validationError.Append(err) + continue + } + } else { + firstIP := net.ParseIP(d[:n]) + secondIP := net.ParseIP(d[n+1:]) + if firstIP == nil || secondIP == nil { + validationError = validationError.Append(errInvalidIP) + continue + } + for i, b := range firstIP { + if b < secondIP[i] { + break + } + + if b == secondIP[i] { + continue + } + + validationError = validationError.Append(errInvalidIP) + continue + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.go b/vendor/code.cloudfoundry.org/bbs/models/sidecar.go new file mode 100644 index 00000000..4f8ce3ab --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.go @@ -0,0 +1,32 @@ +package models + +func (s Sidecar) Validate() error { + var validationError ValidationError + + if s.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := s.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if s.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if s.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + return validationError +} + +func validateSidecars(sidecars []*Sidecar) ValidationError { + var validationError ValidationError + + for _, s := range sidecars { + validationError = validationError.Check(s) + } + + return validationError +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go b/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go new file mode 100644 index 00000000..3a9f6ec1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go @@ -0,0 +1,472 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: sidecar.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Sidecar struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,3,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` +} + +func (m *Sidecar) Reset() { *m = Sidecar{} } +func (*Sidecar) ProtoMessage() {} +func (*Sidecar) Descriptor() ([]byte, []int) { + return fileDescriptor_179ad3b13e6397ec, []int{0} +} +func (m *Sidecar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sidecar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sidecar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sidecar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sidecar.Merge(m, src) +} +func (m *Sidecar) XXX_Size() int { + return m.Size() +} +func (m *Sidecar) XXX_DiscardUnknown() { + xxx_messageInfo_Sidecar.DiscardUnknown(m) +} + +var xxx_messageInfo_Sidecar proto.InternalMessageInfo + +func (m *Sidecar) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *Sidecar) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *Sidecar) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func init() { + proto.RegisterType((*Sidecar)(nil), "models.Sidecar") +} + +func init() { proto.RegisterFile("sidecar.proto", fileDescriptor_179ad3b13e6397ec) } + +var fileDescriptor_179ad3b13e6397ec = []byte{ + // 239 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0xce, 0x4c, 0x49, + 0x4d, 0x4e, 0x2c, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, + 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0x26, + 0xc5, 0x9b, 0x98, 0x5c, 0x92, 0x99, 0x9f, 0x57, 0x0c, 0xe1, 0x2a, 0x35, 0x33, 0x72, 0xb1, 0x07, + 0x43, 0xcc, 0x15, 0x52, 0xe3, 0x62, 0x83, 0x48, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xf1, + 0xe9, 0x41, 0xac, 0xd0, 0x73, 0x04, 0x8b, 0x06, 0x41, 0x65, 0x85, 0x54, 0xb8, 0xd8, 0x53, 0x32, + 0x8b, 0xb3, 0xe3, 0x73, 0x93, 0x24, 0x98, 0x14, 0x18, 0x35, 0x58, 0x9d, 0xb8, 0x5f, 0xdd, 0x93, + 0x87, 0x09, 0x05, 0xb1, 0x81, 0x18, 0xbe, 0x49, 0x42, 0x5a, 0x5c, 0x9c, 0xb9, 0xa9, 0xb9, 0xf9, + 0x45, 0x95, 0x20, 0x75, 0xcc, 0x60, 0x75, 0xbc, 0xaf, 0xee, 0xc9, 0x23, 0x04, 0x83, 0x38, 0x20, + 0x4c, 0xdf, 0x24, 0x27, 0x93, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, + 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, + 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x2f, + 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x28, 0x34, 0x4f, 0x19, 0x01, 0x00, 0x00, +} + +func (this *Sidecar) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sidecar) + if !ok { + that2, ok := that.(Sidecar) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + return true +} +func (this *Sidecar) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.Sidecar{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSidecar(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Sidecar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sidecar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sidecar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MemoryMb != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x18 + } + if m.DiskMb != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSidecar(dAtA []byte, offset int, v uint64) int { + offset -= sovSidecar(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Sidecar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovSidecar(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovSidecar(uint64(m.MemoryMb)) + } + return n +} + +func sovSidecar(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSidecar(x uint64) (n int) { + return sovSidecar(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Sidecar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sidecar{`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `}`, + }, "") + return s +} +func valueToStringSidecar(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Sidecar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sidecar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sidecar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSidecar(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSidecar + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSidecar + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSidecar + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSidecar = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSidecar = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSidecar = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto b/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto new file mode 100644 index 00000000..baa13bcb --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; + +message Sidecar { + Action action = 1; + + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 3 [(gogoproto.jsontag) = "memory_mb"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.go b/vendor/code.cloudfoundry.org/bbs/models/task.go new file mode 100644 index 00000000..2319fe99 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.go @@ -0,0 +1,206 @@ +package models + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "code.cloudfoundry.org/bbs/format" + "code.cloudfoundry.org/lager/v3" +) + +var taskGuidPattern = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +type TaskChange struct { + Before *Task + After *Task +} + +type TaskFilter struct { + Domain string + CellID string +} + +func (t *Task) LagerData() lager.Data { + return lager.Data{ + "task_guid": t.TaskGuid, + "domain": t.Domain, + "state": t.State, + "cell_id": t.CellId, + } +} + +func (task *Task) Validate() error { + var validationError ValidationError + + if task.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !taskGuidPattern.MatchString(task.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if task.TaskDefinition == nil { + validationError = validationError.Append(ErrInvalidField{"task_definition"}) + } else if defErr := task.TaskDefinition.Validate(); defErr != nil { + validationError = validationError.Append(defErr) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (t *Task) Copy() *Task { + newTask := *t + newTask.TaskDefinition = t.TaskDefinition.Copy() + return &newTask +} + +func (t *Task) ValidateTransitionTo(to Task_State) error { + var valid bool + from := t.State + switch to { + case Task_Running: + valid = from == Task_Pending + case Task_Completed: + valid = from == Task_Running + case Task_Resolving: + valid = from == Task_Completed + } + + if !valid { + return NewError( + Error_InvalidStateTransition, + fmt.Sprintf("Cannot transition from %s to %s", from.String(), to.String()), + ) + } + + return nil +} + +func (t *TaskDefinition) Copy() *TaskDefinition { + if t == nil { + return &TaskDefinition{} + } + newTaskDef := *t + return &newTaskDef +} + +func (def *TaskDefinition) Validate() error { + var validationError ValidationError + + if def.RootFs == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } else { + rootFsURL, err := url.Parse(def.RootFs) + if err != nil || rootFsURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + } + + if def.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := def.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if def.MemoryMb < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if def.DiskMb < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if limit := def.LogRateLimit; limit != nil { + if limit.BytesPerSecond < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit"}) + } + } + + if def.MaxPids < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + if len(def.Annotation) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + for _, rule := range def.EgressRules { + err := rule.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"egress_rules"}) + } + } + + if def.ImageUsername == "" && def.ImagePassword != "" { + validationError = validationError.Append(ErrInvalidField{"image_username"}) + } + + if def.ImageUsername != "" && def.ImagePassword == "" { + validationError = validationError.Append(ErrInvalidField{"image_password"}) + } + + err := validateCachedDependencies(def.CachedDependencies) + if err != nil { + validationError = validationError.Append(err) + } + + err = validateImageLayers(def.ImageLayers, def.LegacyDownloadUser) + if err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func downgradeTaskDefinitionV3ToV2(t *TaskDefinition) *TaskDefinition { + layers := ImageLayers(t.ImageLayers) + + t.CachedDependencies = append(layers.ToCachedDependencies(), t.CachedDependencies...) + t.Action = layers.ToDownloadActions(t.LegacyDownloadUser, t.Action) + t.ImageLayers = nil + + return t +} + +func (t *Task) VersionDownTo(v format.Version) *Task { + t = t.Copy() + + if v < t.Version() { + t.TaskDefinition = downgradeTaskDefinitionV3ToV2(t.TaskDefinition) + } + + return t +} + +func (t *Task) Version() format.Version { + return format.V3 +} + +func (s *Task_State) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := Task_State_value[name]; found { + *s = Task_State(v) + return nil + } + return fmt.Errorf("invalid state: %s", name) +} + +func (s Task_State) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.pb.go b/vendor/code.cloudfoundry.org/bbs/models/task.pb.go new file mode 100644 index 00000000..a7e40bd3 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.pb.go @@ -0,0 +1,3101 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: task.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Task_State int32 + +const ( + Task_Invalid Task_State = 0 + Task_Pending Task_State = 1 + Task_Running Task_State = 2 + Task_Completed Task_State = 3 + Task_Resolving Task_State = 4 +) + +var Task_State_name = map[int32]string{ + 0: "Invalid", + 1: "Pending", + 2: "Running", + 3: "Completed", + 4: "Resolving", +} + +var Task_State_value = map[string]int32{ + "Invalid": 0, + "Pending": 1, + "Running": 2, + "Completed": 3, + "Resolving": 4, +} + +func (Task_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{1, 0} +} + +type TaskDefinition struct { + RootFs string `protobuf:"bytes,1,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + EnvironmentVariables []*EnvironmentVariable `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables,proto3" json:"env,omitempty"` + Action *Action `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` + DiskMb int32 `protobuf:"varint,4,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,5,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + CpuWeight uint32 `protobuf:"varint,6,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Privileged bool `protobuf:"varint,7,opt,name=privileged,proto3" json:"privileged"` + LogSource string `protobuf:"bytes,8,opt,name=log_source,json=logSource,proto3" json:"log_source"` + LogGuid string `protobuf:"bytes,9,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` + MetricsGuid string `protobuf:"bytes,10,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` + ResultFile string `protobuf:"bytes,11,opt,name=result_file,json=resultFile,proto3" json:"result_file"` + CompletionCallbackUrl string `protobuf:"bytes,12,opt,name=completion_callback_url,json=completionCallbackUrl,proto3" json:"completion_callback_url,omitempty"` + Annotation string `protobuf:"bytes,13,opt,name=annotation,proto3" json:"annotation,omitempty"` + EgressRules []*SecurityGroupRule `protobuf:"bytes,14,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules,omitempty"` + CachedDependencies []*CachedDependency `protobuf:"bytes,15,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,16,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,17,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,18,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,19,opt,name=network,proto3" json:"network,omitempty"` + PlacementTags []string `protobuf:"bytes,20,rep,name=placement_tags,json=placementTags,proto3" json:"placement_tags,omitempty"` + MaxPids int32 `protobuf:"varint,21,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` + CertificateProperties *CertificateProperties `protobuf:"bytes,22,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,23,opt,name=image_username,json=imageUsername,proto3" json:"image_username"` + ImagePassword string `protobuf:"bytes,24,opt,name=image_password,json=imagePassword,proto3" json:"image_password"` + ImageLayers []*ImageLayer `protobuf:"bytes,25,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,26,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,27,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + VolumeMountedFiles []*File `protobuf:"bytes,28,rep,name=volume_mounted_files,json=volumeMountedFiles,proto3" json:"volume_mounted_files"` +} + +func (m *TaskDefinition) Reset() { *m = TaskDefinition{} } +func (*TaskDefinition) ProtoMessage() {} +func (*TaskDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{0} +} +func (m *TaskDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskDefinition.Merge(m, src) +} +func (m *TaskDefinition) XXX_Size() int { + return m.Size() +} +func (m *TaskDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_TaskDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskDefinition proto.InternalMessageInfo + +func (m *TaskDefinition) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *TaskDefinition) GetEnvironmentVariables() []*EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *TaskDefinition) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *TaskDefinition) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *TaskDefinition) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *TaskDefinition) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *TaskDefinition) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *TaskDefinition) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *TaskDefinition) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +func (m *TaskDefinition) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *TaskDefinition) GetResultFile() string { + if m != nil { + return m.ResultFile + } + return "" +} + +func (m *TaskDefinition) GetCompletionCallbackUrl() string { + if m != nil { + return m.CompletionCallbackUrl + } + return "" +} + +func (m *TaskDefinition) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *TaskDefinition) GetEgressRules() []*SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *TaskDefinition) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *TaskDefinition) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *TaskDefinition) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *TaskDefinition) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *TaskDefinition) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *TaskDefinition) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *TaskDefinition) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +func (m *TaskDefinition) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *TaskDefinition) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *TaskDefinition) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *TaskDefinition) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +func (m *TaskDefinition) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +func (m *TaskDefinition) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *TaskDefinition) GetVolumeMountedFiles() []*File { + if m != nil { + return m.VolumeMountedFiles + } + return nil +} + +type Task struct { + *TaskDefinition `protobuf:"bytes,1,opt,name=task_definition,json=taskDefinition,proto3,embedded=task_definition" json:""` + TaskGuid string `protobuf:"bytes,2,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at"` + UpdatedAt int64 `protobuf:"varint,5,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at"` + FirstCompletedAt int64 `protobuf:"varint,6,opt,name=first_completed_at,json=firstCompletedAt,proto3" json:"first_completed_at"` + State Task_State `protobuf:"varint,7,opt,name=state,proto3,enum=models.Task_State" json:"state"` + CellId string `protobuf:"bytes,8,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + Result string `protobuf:"bytes,9,opt,name=result,proto3" json:"result"` + Failed bool `protobuf:"varint,10,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,11,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + RejectionCount int32 `protobuf:"varint,12,opt,name=rejection_count,json=rejectionCount,proto3" json:"rejection_count"` + RejectionReason string `protobuf:"bytes,13,opt,name=rejection_reason,json=rejectionReason,proto3" json:"rejection_reason"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{1} +} +func (m *Task) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(m, src) +} +func (m *Task) XXX_Size() int { + return m.Size() +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *Task) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *Task) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Task) GetUpdatedAt() int64 { + if m != nil { + return m.UpdatedAt + } + return 0 +} + +func (m *Task) GetFirstCompletedAt() int64 { + if m != nil { + return m.FirstCompletedAt + } + return 0 +} + +func (m *Task) GetState() Task_State { + if m != nil { + return m.State + } + return Task_Invalid +} + +func (m *Task) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *Task) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +func (m *Task) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *Task) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *Task) GetRejectionCount() int32 { + if m != nil { + return m.RejectionCount + } + return 0 +} + +func (m *Task) GetRejectionReason() string { + if m != nil { + return m.RejectionReason + } + return "" +} + +func init() { + proto.RegisterEnum("models.Task_State", Task_State_name, Task_State_value) + proto.RegisterType((*TaskDefinition)(nil), "models.TaskDefinition") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.TaskDefinition.MetricTagsEntry") + proto.RegisterType((*Task)(nil), "models.Task") +} + +func init() { proto.RegisterFile("task.proto", fileDescriptor_ce5d8dd45b4a91ff) } + +var fileDescriptor_ce5d8dd45b4a91ff = []byte{ + // 1386 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x56, 0xdd, 0x6e, 0xdb, 0x36, + 0x14, 0x8e, 0x92, 0xda, 0x89, 0xe9, 0x9f, 0x38, 0xcc, 0x4f, 0xd5, 0xb4, 0xb5, 0x8c, 0x6c, 0xeb, + 0xb2, 0xa2, 0x4d, 0x87, 0xb6, 0x1b, 0xba, 0xa2, 0xc0, 0x10, 0x27, 0x6d, 0x10, 0x20, 0x19, 0x02, + 0xa6, 0xe9, 0x76, 0x27, 0xd0, 0x12, 0xad, 0x70, 0x91, 0x44, 0x83, 0xa4, 0x9c, 0xfa, 0xae, 0x8f, + 0xb0, 0xc7, 0xd8, 0xa3, 0xec, 0x32, 0x97, 0xbd, 0x12, 0xd6, 0xf4, 0x66, 0xf0, 0x55, 0x1f, 0x61, + 0xe0, 0x91, 0x64, 0x3b, 0x69, 0xae, 0x74, 0xce, 0xf7, 0x7d, 0x87, 0x34, 0x0f, 0x79, 0xce, 0x31, + 0x42, 0x9a, 0xaa, 0xb3, 0xad, 0xbe, 0x14, 0x5a, 0xe0, 0x72, 0x24, 0x7c, 0x16, 0xaa, 0xf5, 0xc7, + 0x01, 0xd7, 0xa7, 0x49, 0x77, 0xcb, 0x13, 0xd1, 0x93, 0x40, 0x04, 0xe2, 0x09, 0xd0, 0xdd, 0xa4, + 0x07, 0x1e, 0x38, 0x60, 0x65, 0x61, 0xeb, 0x75, 0xea, 0x69, 0x2e, 0x62, 0x95, 0xbb, 0x77, 0x59, + 0x3c, 0xe0, 0x52, 0xc4, 0x11, 0x8b, 0xb5, 0x3b, 0xa0, 0x92, 0xd3, 0x6e, 0xc8, 0x0a, 0x72, 0x45, + 0x31, 0x2f, 0x91, 0x5c, 0x0f, 0xdd, 0x40, 0x8a, 0xa4, 0x9f, 0xa3, 0xb7, 0x3d, 0xea, 0x9d, 0x32, + 0xdf, 0xf5, 0x59, 0x9f, 0xc5, 0x3e, 0x8b, 0xbd, 0x61, 0x4e, 0xe0, 0x81, 0x08, 0x93, 0x88, 0xb9, + 0x91, 0x48, 0x62, 0x5d, 0x6c, 0x17, 0x33, 0x7d, 0x2e, 0x64, 0xfe, 0xa3, 0xd7, 0xef, 0x79, 0x4c, + 0x6a, 0xde, 0xe3, 0x1e, 0xd5, 0xcc, 0xed, 0x4b, 0xd1, 0x37, 0xee, 0x78, 0xbf, 0x25, 0x1e, 0xd1, + 0x80, 0xb9, 0x21, 0x1d, 0x32, 0x59, 0xfc, 0x84, 0x50, 0x04, 0xae, 0x34, 0xea, 0x90, 0x47, 0xbc, + 0x58, 0x75, 0x29, 0x62, 0x5a, 0x72, 0xcf, 0xd5, 0x34, 0x28, 0x62, 0x51, 0x8f, 0x87, 0x2c, 0xb3, + 0x37, 0x3e, 0xd4, 0x51, 0xe3, 0x2d, 0x55, 0x67, 0xbb, 0xac, 0xc7, 0x63, 0x6e, 0x8e, 0x8b, 0xbf, + 0x41, 0xf3, 0x52, 0x08, 0xed, 0xf6, 0x94, 0x6d, 0xb5, 0xad, 0xcd, 0x4a, 0x07, 0x8d, 0x52, 0xa7, + 0x6c, 0xa0, 0x9e, 0x22, 0xf0, 0x7d, 0xa3, 0xb0, 0x87, 0x56, 0x6f, 0x4c, 0x87, 0x3d, 0xdb, 0x9e, + 0xdb, 0xac, 0x3e, 0xbd, 0xbb, 0x95, 0xa5, 0x7c, 0xeb, 0xf5, 0x44, 0xf4, 0x2e, 0xd7, 0x74, 0x96, + 0x46, 0xa9, 0x53, 0x67, 0xf1, 0xe0, 0x91, 0x88, 0xb8, 0x66, 0x51, 0x5f, 0x0f, 0xc9, 0x0a, 0xfb, + 0x5a, 0xa7, 0xf0, 0x03, 0x54, 0xce, 0xae, 0xc0, 0x9e, 0x6b, 0x5b, 0x9b, 0xd5, 0xa7, 0x8d, 0x62, + 0xd5, 0x6d, 0x40, 0x49, 0xce, 0xe2, 0x6f, 0xd1, 0xbc, 0xcf, 0xd5, 0x99, 0x1b, 0x75, 0xed, 0x5b, + 0x6d, 0x6b, 0xb3, 0xd4, 0xa9, 0x8e, 0x52, 0xa7, 0x80, 0x48, 0xd9, 0x18, 0x87, 0x5d, 0xfc, 0x10, + 0x55, 0x22, 0x16, 0x09, 0x39, 0x34, 0xba, 0x12, 0xe8, 0xea, 0xa3, 0xd4, 0x99, 0x80, 0x64, 0x21, + 0x33, 0x0f, 0xbb, 0xf8, 0x31, 0x42, 0x5e, 0x3f, 0x71, 0xcf, 0x19, 0x0f, 0x4e, 0xb5, 0x5d, 0x6e, + 0x5b, 0x9b, 0xf5, 0x4e, 0x63, 0x94, 0x3a, 0x53, 0x28, 0xa9, 0x78, 0xfd, 0xe4, 0x77, 0x30, 0xf1, + 0x16, 0x42, 0x7d, 0xc9, 0x07, 0x3c, 0x64, 0x01, 0xf3, 0xed, 0xf9, 0xb6, 0xb5, 0xb9, 0x90, 0xc9, + 0x27, 0x28, 0x99, 0xb2, 0xcd, 0xf2, 0xe6, 0xb2, 0x94, 0x48, 0xa4, 0xc7, 0xec, 0x05, 0xc8, 0x32, + 0xe8, 0x27, 0x28, 0xa9, 0x84, 0x22, 0x38, 0x06, 0x13, 0x7f, 0x8f, 0x16, 0x0c, 0x11, 0x24, 0xdc, + 0xb7, 0x2b, 0x20, 0xae, 0x8d, 0x52, 0x67, 0x8c, 0x91, 0xf9, 0x50, 0x04, 0x7b, 0x09, 0xf7, 0xf1, + 0x33, 0x54, 0xcb, 0xae, 0x5b, 0x65, 0x62, 0x04, 0xe2, 0xe6, 0x28, 0x75, 0xae, 0xe0, 0xa4, 0x9a, + 0x7b, 0x10, 0xf4, 0x23, 0xaa, 0x4a, 0xa6, 0x92, 0x50, 0xbb, 0xe6, 0x5d, 0xd8, 0x55, 0x88, 0x59, + 0x1c, 0xa5, 0xce, 0x34, 0x4c, 0x50, 0xe6, 0xbc, 0xe1, 0x21, 0xc3, 0x3f, 0xa3, 0xdb, 0x9e, 0x88, + 0xfa, 0x21, 0x33, 0xd9, 0x77, 0x3d, 0x1a, 0x86, 0x5d, 0xea, 0x9d, 0xb9, 0x89, 0x0c, 0xed, 0x9a, + 0x89, 0x26, 0xab, 0x13, 0x7a, 0x27, 0x67, 0x4f, 0x64, 0x88, 0x5b, 0x08, 0xd1, 0x38, 0x16, 0x9a, + 0xc2, 0x9d, 0xd6, 0x41, 0x3a, 0x85, 0xe0, 0x57, 0xa8, 0xc6, 0x02, 0xc9, 0x94, 0x72, 0x65, 0x62, + 0xde, 0x52, 0x03, 0xde, 0xd2, 0x9d, 0xe2, 0xd6, 0x8f, 0xf3, 0x12, 0xdb, 0x33, 0x15, 0x46, 0x92, + 0x90, 0x91, 0x6a, 0x26, 0x37, 0xb6, 0xc2, 0xfb, 0x68, 0xf9, 0x7a, 0xb9, 0x71, 0xa6, 0xec, 0x45, + 0x58, 0xc4, 0x2e, 0x16, 0xd9, 0x01, 0xc9, 0xee, 0xb8, 0x20, 0x09, 0xf6, 0xae, 0x22, 0x9c, 0x29, + 0xfc, 0x1c, 0xad, 0x84, 0x2c, 0xa0, 0xde, 0xd0, 0xf5, 0xc5, 0x79, 0x1c, 0x0a, 0xea, 0xbb, 0x89, + 0x62, 0xd2, 0x6e, 0x42, 0x6e, 0x66, 0x6d, 0x8b, 0xe0, 0x8c, 0xdf, 0xcd, 0xe9, 0x13, 0xc5, 0x24, + 0xde, 0x43, 0x6d, 0x2d, 0x13, 0xa5, 0x99, 0xef, 0xaa, 0xa1, 0xd2, 0x2c, 0x72, 0xa7, 0x4a, 0x58, + 0xb9, 0x7d, 0xaa, 0x4f, 0xed, 0x25, 0x38, 0xf4, 0xfd, 0x5c, 0x77, 0x0c, 0xb2, 0x9d, 0x29, 0xd5, + 0x11, 0xd5, 0xa7, 0xf8, 0x05, 0xaa, 0x4f, 0xf7, 0x07, 0x65, 0x63, 0x38, 0xc3, 0x72, 0x71, 0x86, + 0x77, 0x40, 0x1e, 0x1a, 0x8e, 0xd4, 0x06, 0x13, 0x47, 0xe1, 0x1f, 0xd0, 0x7c, 0xde, 0x45, 0xec, + 0x65, 0x28, 0x99, 0xc5, 0x22, 0xe6, 0xb7, 0x0c, 0x26, 0x05, 0x8f, 0xbf, 0x43, 0x8d, 0x7e, 0x48, + 0x3d, 0x06, 0xf5, 0x6b, 0xba, 0x83, 0xbd, 0xd2, 0x9e, 0xdb, 0xac, 0x90, 0xfa, 0x18, 0x7d, 0x4b, + 0x03, 0x65, 0xde, 0x5e, 0x44, 0xdf, 0xbb, 0x7d, 0xee, 0x2b, 0x7b, 0x15, 0x8a, 0x06, 0xde, 0x5e, + 0x81, 0x91, 0xf9, 0x88, 0xbe, 0x3f, 0xe2, 0xbe, 0xc2, 0x6f, 0xd1, 0xda, 0xcd, 0x1d, 0xcb, 0x5e, + 0x83, 0x5f, 0x72, 0x7f, 0x7c, 0x03, 0x13, 0xd5, 0xd1, 0x58, 0x44, 0x56, 0xbd, 0x9b, 0x60, 0xfc, + 0x0b, 0x6a, 0x64, 0x9d, 0xce, 0xe4, 0x3f, 0xa6, 0x11, 0xb3, 0x6f, 0xc3, 0x1d, 0xe0, 0x51, 0xea, + 0x5c, 0x63, 0x48, 0x1d, 0xfc, 0x93, 0xdc, 0x9d, 0x84, 0xf6, 0xa9, 0x52, 0xe7, 0x42, 0xfa, 0xb6, + 0x7d, 0x3d, 0xb4, 0x60, 0xf2, 0xd0, 0xa3, 0xdc, 0xc5, 0x3f, 0xa1, 0xda, 0x54, 0x7f, 0x55, 0xf6, + 0x1d, 0xc8, 0x3f, 0x2e, 0x4e, 0xb0, 0x6f, 0xb8, 0x03, 0x43, 0x91, 0x2a, 0x1f, 0xdb, 0x0a, 0xbf, + 0x44, 0x8d, 0xab, 0x3d, 0xd8, 0x5e, 0x87, 0xa3, 0xaf, 0x14, 0x81, 0x07, 0x22, 0x20, 0x54, 0xb3, + 0x03, 0xc3, 0x91, 0x5a, 0x38, 0xe5, 0xe1, 0x3d, 0x54, 0x9d, 0xea, 0xd4, 0xf6, 0x5d, 0xd8, 0xf1, + 0x41, 0x11, 0x78, 0xb5, 0x45, 0x6f, 0x1d, 0x82, 0xd2, 0xdc, 0xcf, 0xeb, 0x58, 0xcb, 0x21, 0x41, + 0xd1, 0x18, 0xc0, 0x7f, 0xa0, 0x95, 0xe9, 0xc7, 0xc3, 0x7c, 0xa8, 0x5f, 0x65, 0xdf, 0x83, 0x15, + 0x6b, 0xc5, 0x8a, 0xa6, 0x90, 0x3b, 0xf6, 0x28, 0x75, 0x6e, 0x54, 0x13, 0x3c, 0xf5, 0xac, 0x98, + 0x6f, 0xc4, 0x6a, 0xfd, 0x04, 0x2d, 0x5e, 0xdb, 0x18, 0x37, 0xd1, 0xdc, 0x19, 0x1b, 0x66, 0x73, + 0x82, 0x18, 0x13, 0x3f, 0x42, 0xa5, 0x01, 0x0d, 0x13, 0x66, 0xcf, 0xc2, 0xd1, 0xd7, 0x8a, 0xfd, + 0xc6, 0x91, 0xef, 0x0c, 0x4b, 0x32, 0xd1, 0xcb, 0xd9, 0x17, 0xd6, 0xc6, 0x97, 0x12, 0xba, 0x65, + 0xce, 0x87, 0xf7, 0xd1, 0xa2, 0x19, 0xda, 0xae, 0x3f, 0x3e, 0x28, 0x2c, 0x3c, 0xb5, 0xc8, 0xd5, + 0x34, 0x74, 0x16, 0x2e, 0x52, 0xc7, 0x1a, 0xa5, 0xce, 0x0c, 0x69, 0xe8, 0xab, 0x33, 0xec, 0x21, + 0xaa, 0xc0, 0x52, 0xd0, 0x05, 0x67, 0xe1, 0xda, 0xa1, 0xd7, 0x8f, 0x41, 0xb2, 0x60, 0x4c, 0xe8, + 0x7f, 0x1b, 0xa8, 0xec, 0x8b, 0x88, 0xf2, 0x6c, 0xca, 0xe4, 0xe3, 0x2e, 0x43, 0x48, 0xfe, 0x85, + 0x79, 0x20, 0x19, 0x35, 0xf9, 0xa1, 0x1a, 0x86, 0xcc, 0x5c, 0x3e, 0x0f, 0xc6, 0x28, 0xa9, 0xe4, + 0xf6, 0xb6, 0x36, 0xf2, 0xa4, 0xef, 0x17, 0xf2, 0xd2, 0x44, 0x3e, 0x41, 0x49, 0x25, 0xb7, 0xb7, + 0x35, 0xde, 0x45, 0xb8, 0xc7, 0xa5, 0xd2, 0x6e, 0xde, 0x36, 0xb3, 0xb0, 0x32, 0x84, 0xad, 0x8d, + 0x52, 0xe7, 0x06, 0x96, 0x34, 0x01, 0xdb, 0x29, 0xa0, 0x6d, 0x8d, 0x9f, 0xa1, 0x92, 0xd2, 0x54, + 0x33, 0x98, 0x3f, 0x8d, 0xc9, 0x6b, 0x35, 0x49, 0xdb, 0x3a, 0x36, 0x4c, 0xa7, 0x32, 0x4a, 0x9d, + 0x4c, 0x44, 0xb2, 0x8f, 0x19, 0x9d, 0x1e, 0x0b, 0x43, 0x97, 0xfb, 0xf9, 0x18, 0x82, 0xd1, 0x99, + 0x43, 0xa4, 0x6c, 0x8c, 0x7d, 0x48, 0x51, 0xd6, 0xfe, 0xf3, 0xf1, 0x93, 0xfd, 0x23, 0x00, 0x84, + 0xe4, 0x5f, 0xa3, 0xe9, 0x51, 0x1e, 0xb2, 0x6c, 0xea, 0x2c, 0x64, 0x9a, 0x0c, 0x21, 0xf9, 0xd7, + 0x94, 0xa4, 0xb1, 0x12, 0xc9, 0x5c, 0xc9, 0xa8, 0x12, 0x71, 0x3e, 0x6d, 0xa0, 0x24, 0xaf, 0x32, + 0xa4, 0x9e, 0xfb, 0x04, 0x5c, 0xfc, 0x0a, 0x2d, 0x4a, 0xf6, 0x27, 0xf3, 0xb2, 0x91, 0x63, 0x9e, + 0x25, 0xcc, 0x9a, 0x52, 0x67, 0x79, 0x94, 0x3a, 0xd7, 0x29, 0xd2, 0x18, 0x03, 0x3b, 0xc6, 0xc7, + 0xbf, 0xa2, 0xe6, 0x44, 0x92, 0x6f, 0x0d, 0xf3, 0xa7, 0xb3, 0x32, 0x4a, 0x9d, 0xaf, 0x38, 0x32, + 0x59, 0x30, 0xdb, 0x7e, 0xe3, 0x00, 0x95, 0x20, 0x85, 0xb8, 0x8a, 0xe6, 0xf7, 0xe3, 0x01, 0x0d, + 0xb9, 0xdf, 0x9c, 0x31, 0xce, 0x11, 0x8b, 0x7d, 0x1e, 0x07, 0x4d, 0xcb, 0x38, 0x24, 0x89, 0x63, + 0xe3, 0xcc, 0xe2, 0x3a, 0xaa, 0x8c, 0xef, 0xa6, 0x39, 0x67, 0x5c, 0xc2, 0x94, 0x08, 0x07, 0x86, + 0xbd, 0xd5, 0x79, 0x7e, 0xf1, 0xa9, 0x65, 0x7d, 0xfc, 0xd4, 0x9a, 0xf9, 0xf2, 0xa9, 0x65, 0x7d, + 0xb8, 0x6c, 0x59, 0x7f, 0x5f, 0xb6, 0xac, 0x7f, 0x2e, 0x5b, 0xd6, 0xc5, 0x65, 0xcb, 0xfa, 0xf7, + 0xb2, 0x65, 0xfd, 0x77, 0xd9, 0x9a, 0xf9, 0x72, 0xd9, 0xb2, 0xfe, 0xfa, 0xdc, 0x9a, 0xb9, 0xf8, + 0xdc, 0x9a, 0xf9, 0xf8, 0xb9, 0x35, 0xd3, 0x2d, 0xc3, 0x5f, 0xb6, 0x67, 0xff, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x19, 0x37, 0xa2, 0x26, 0xdb, 0x0a, 0x00, 0x00, +} + +func (x Task_State) String() string { + s, ok := Task_State_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *TaskDefinition) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskDefinition) + if !ok { + that2, ok := that.(TaskDefinition) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.ResultFile != that1.ResultFile { + return false + } + if this.CompletionCallbackUrl != that1.CompletionCallbackUrl { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(that1.EgressRules[i]) { + return false + } + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if this.MaxPids != that1.MaxPids { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + if len(this.VolumeMountedFiles) != len(that1.VolumeMountedFiles) { + return false + } + for i := range this.VolumeMountedFiles { + if !this.VolumeMountedFiles[i].Equal(that1.VolumeMountedFiles[i]) { + return false + } + } + return true +} +func (this *Task) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Task) + if !ok { + that2, ok := that.(Task) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskDefinition.Equal(that1.TaskDefinition) { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + if this.UpdatedAt != that1.UpdatedAt { + return false + } + if this.FirstCompletedAt != that1.FirstCompletedAt { + return false + } + if this.State != that1.State { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.Result != that1.Result { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.RejectionCount != that1.RejectionCount { + return false + } + if this.RejectionReason != that1.RejectionReason { + return false + } + return true +} +func (this *TaskDefinition) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 32) + s = append(s, "&models.TaskDefinition{") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + if this.EnvironmentVariables != nil { + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", this.EnvironmentVariables)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "ResultFile: "+fmt.Sprintf("%#v", this.ResultFile)+",\n") + s = append(s, "CompletionCallbackUrl: "+fmt.Sprintf("%#v", this.CompletionCallbackUrl)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + if this.EgressRules != nil { + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", this.EgressRules)+",\n") + } + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.VolumeMountedFiles != nil { + s = append(s, "VolumeMountedFiles: "+fmt.Sprintf("%#v", this.VolumeMountedFiles)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Task) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 17) + s = append(s, "&models.Task{") + if this.TaskDefinition != nil { + s = append(s, "TaskDefinition: "+fmt.Sprintf("%#v", this.TaskDefinition)+",\n") + } + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + s = append(s, "UpdatedAt: "+fmt.Sprintf("%#v", this.UpdatedAt)+",\n") + s = append(s, "FirstCompletedAt: "+fmt.Sprintf("%#v", this.FirstCompletedAt)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "RejectionCount: "+fmt.Sprintf("%#v", this.RejectionCount)+",\n") + s = append(s, "RejectionReason: "+fmt.Sprintf("%#v", this.RejectionReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TaskDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeMountedFiles) > 0 { + for iNdEx := len(m.VolumeMountedFiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMountedFiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTask(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTask(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintTask(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintTask(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.MaxPids != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintTask(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintTask(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintTask(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintTask(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x6a + } + if len(m.CompletionCallbackUrl) > 0 { + i -= len(m.CompletionCallbackUrl) + copy(dAtA[i:], m.CompletionCallbackUrl) + i = encodeVarintTask(dAtA, i, uint64(len(m.CompletionCallbackUrl))) + i-- + dAtA[i] = 0x62 + } + if len(m.ResultFile) > 0 { + i -= len(m.ResultFile) + copy(dAtA[i:], m.ResultFile) + i = encodeVarintTask(dAtA, i, uint64(len(m.ResultFile))) + i-- + dAtA[i] = 0x5a + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x52 + } + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x4a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintTask(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x42 + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.CpuWeight != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x30 + } + if m.MemoryMb != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x28 + } + if m.DiskMb != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x20 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintTask(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Task) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectionReason) > 0 { + i -= len(m.RejectionReason) + copy(dAtA[i:], m.RejectionReason) + i = encodeVarintTask(dAtA, i, uint64(len(m.RejectionReason))) + i-- + dAtA[i] = 0x6a + } + if m.RejectionCount != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.RejectionCount)) + i-- + dAtA[i] = 0x60 + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTask(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x5a + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTask(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x4a + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTask(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x42 + } + if m.State != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x38 + } + if m.FirstCompletedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.FirstCompletedAt)) + i-- + dAtA[i] = 0x30 + } + if m.UpdatedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.UpdatedAt)) + i-- + dAtA[i] = 0x28 + } + if m.CreatedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTask(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0x12 + } + if m.TaskDefinition != nil { + { + size, err := m.TaskDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTask(dAtA []byte, offset int, v uint64) int { + offset -= sovTask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TaskDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovTask(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovTask(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovTask(uint64(m.MemoryMb)) + } + if m.CpuWeight != 0 { + n += 1 + sovTask(uint64(m.CpuWeight)) + } + if m.Privileged { + n += 2 + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ResultFile) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.CompletionCallbackUrl) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovTask(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovTask(uint64(l)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 2 + l + sovTask(uint64(l)) + } + } + if m.MaxPids != 0 { + n += 2 + sovTask(uint64(m.MaxPids)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovTask(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovTask(uint64(l)) + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovTask(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovTask(uint64(len(k))) + l + n += mapEntrySize + 2 + sovTask(uint64(mapEntrySize)) + } + } + if len(m.VolumeMountedFiles) > 0 { + for _, e := range m.VolumeMountedFiles { + l = e.Size() + n += 2 + l + sovTask(uint64(l)) + } + } + return n +} + +func (m *Task) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskDefinition != nil { + l = m.TaskDefinition.Size() + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovTask(uint64(m.CreatedAt)) + } + if m.UpdatedAt != 0 { + n += 1 + sovTask(uint64(m.UpdatedAt)) + } + if m.FirstCompletedAt != 0 { + n += 1 + sovTask(uint64(m.FirstCompletedAt)) + } + if m.State != 0 { + n += 1 + sovTask(uint64(m.State)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.RejectionCount != 0 { + n += 1 + sovTask(uint64(m.RejectionCount)) + } + l = len(m.RejectionReason) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func sovTask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTask(x uint64) (n int) { + return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TaskDefinition) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]*EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]*SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += strings.Replace(fmt.Sprintf("%v", f), "SecurityGroupRule", "SecurityGroupRule", 1) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + repeatedStringForVolumeMountedFiles := "[]*File{" + for _, f := range this.VolumeMountedFiles { + repeatedStringForVolumeMountedFiles += strings.Replace(fmt.Sprintf("%v", f), "File", "File", 1) + "," + } + repeatedStringForVolumeMountedFiles += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&TaskDefinition{`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `ResultFile:` + fmt.Sprintf("%v", this.ResultFile) + `,`, + `CompletionCallbackUrl:` + fmt.Sprintf("%v", this.CompletionCallbackUrl) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `VolumeMountedFiles:` + repeatedStringForVolumeMountedFiles + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `TaskDefinition:` + strings.Replace(this.TaskDefinition.String(), "TaskDefinition", "TaskDefinition", 1) + `,`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `UpdatedAt:` + fmt.Sprintf("%v", this.UpdatedAt) + `,`, + `FirstCompletedAt:` + fmt.Sprintf("%v", this.FirstCompletedAt) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `RejectionCount:` + fmt.Sprintf("%v", this.RejectionCount) + `,`, + `RejectionReason:` + fmt.Sprintf("%v", this.RejectionReason) + `,`, + `}`, + }, "") + return s +} +func valueToStringTask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TaskDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, &EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionCallbackUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletionCallbackUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, &SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTask + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTask + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthTask + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthTask + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMountedFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMountedFiles = append(m.VolumeMountedFiles, &File{}) + if err := m.VolumeMountedFiles[len(m.VolumeMountedFiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskDefinition == nil { + m.TaskDefinition = &TaskDefinition{} + } + if err := m.TaskDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + m.UpdatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstCompletedAt", wireType) + } + m.FirstCompletedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FirstCompletedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Task_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionCount", wireType) + } + m.RejectionCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectionCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectionReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.proto b/vendor/code.cloudfoundry.org/bbs/models/task.proto new file mode 100644 index 00000000..697e33c7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; +import "environment_variables.proto"; +import "security_group.proto"; +import "cached_dependency.proto"; +import "volume_mount.proto"; +import "network.proto"; +import "certificate_properties.proto"; +import "image_layer.proto"; +import "log_rate_limit.proto"; +import "metric_tags.proto"; +import "file.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message TaskDefinition { + string root_fs = 1 [(gogoproto.jsontag) = "rootfs"]; + repeated EnvironmentVariable environment_variables = 2 [(gogoproto.jsontag) = "env,omitempty"]; + Action action = 3; + int32 disk_mb = 4 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 5 [(gogoproto.jsontag) = "memory_mb"]; + uint32 cpu_weight = 6 [(gogoproto.jsontag) = "cpu_weight"]; + bool privileged = 7 [(gogoproto.jsontag) = "privileged"]; + string log_source = 8 [(gogoproto.jsontag) = "log_source"]; + string log_guid = 9 [(gogoproto.jsontag) = "log_guid"]; + string metrics_guid = 10 [(gogoproto.jsontag) = "metrics_guid"]; + string result_file = 11 [(gogoproto.jsontag) = "result_file"]; + string completion_callback_url = 12; + string annotation = 13; + repeated SecurityGroupRule egress_rules = 14; + repeated CachedDependency cached_dependencies = 15; + string legacy_download_user = 16 [deprecated=true]; + string trusted_system_certificates_path = 17; + repeated VolumeMount volume_mounts = 18; + Network network = 19; + repeated string placement_tags = 20; + int32 max_pids = 21 [(gogoproto.jsontag) = "max_pids"]; + CertificateProperties certificate_properties = 22; + string image_username = 23 [(gogoproto.jsontag) = "image_username"]; + string image_password = 24 [(gogoproto.jsontag) = "image_password"]; + repeated ImageLayer image_layers = 25; + LogRateLimit log_rate_limit = 26; + map metric_tags = 27; + repeated File volume_mounted_files = 28 [(gogoproto.jsontag) = "volume_mounted_files"]; +} + +message Task { + enum State { + Invalid = 0; + Pending = 1; + Running = 2; + Completed = 3; + Resolving = 4; + } + + TaskDefinition task_definition = 1 [(gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + string task_guid = 2 [(gogoproto.jsontag) = "task_guid"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; + int64 created_at = 4 [(gogoproto.jsontag) = "created_at"]; + int64 updated_at = 5 [(gogoproto.jsontag) = "updated_at"]; + int64 first_completed_at = 6 [(gogoproto.jsontag) = "first_completed_at"]; + + State state = 7 [(gogoproto.jsontag) = "state"]; + + string cell_id = 8 [(gogoproto.jsontag) = "cell_id"]; + + string result = 9 [(gogoproto.jsontag) = "result"]; + bool failed = 10 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 11 [(gogoproto.jsontag) = "failure_reason"]; + int32 rejection_count = 12 [(gogoproto.jsontag) = "rejection_count"]; + string rejection_reason = 13 [(gogoproto.jsontag) = "rejection_reason"]; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.go b/vendor/code.cloudfoundry.org/bbs/models/task_requests.go new file mode 100644 index 00000000..a3fa65aa --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.go @@ -0,0 +1,125 @@ +package models + +func (req *DesireTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if req.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if req.TaskDefinition == nil { + validationError = validationError.Append(ErrInvalidField{"task_definition"}) + } else if defErr := req.TaskDefinition.Validate(); defErr != nil { + validationError = validationError.Append(defErr) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *StartTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *CompleteTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *FailTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.FailureReason == "" { + validationError = validationError.Append(ErrInvalidField{"failure_reason"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *RejectTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.RejectionReason == "" { + validationError = validationError.Append(ErrInvalidField{"failure_reason"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *TasksRequest) Validate() error { + return nil +} + +func (request *TaskByGuidRequest) Validate() error { + var validationError ValidationError + + if request.TaskGuid == "" { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *TaskGuidRequest) Validate() error { + var validationError ValidationError + + if request.TaskGuid == "" { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go new file mode 100644 index 00000000..0e1b642c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go @@ -0,0 +1,4016 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: task_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type TaskLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *TaskLifecycleResponse) Reset() { *m = TaskLifecycleResponse{} } +func (*TaskLifecycleResponse) ProtoMessage() {} +func (*TaskLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{0} +} +func (m *TaskLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskLifecycleResponse.Merge(m, src) +} +func (m *TaskLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskLifecycleResponse proto.InternalMessageInfo + +func (m *TaskLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type DesireTaskRequest struct { + TaskDefinition *TaskDefinition `protobuf:"bytes,1,opt,name=task_definition,json=taskDefinition,proto3" json:"task_definition"` + TaskGuid string `protobuf:"bytes,2,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` +} + +func (m *DesireTaskRequest) Reset() { *m = DesireTaskRequest{} } +func (*DesireTaskRequest) ProtoMessage() {} +func (*DesireTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{1} +} +func (m *DesireTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesireTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesireTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesireTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesireTaskRequest.Merge(m, src) +} +func (m *DesireTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *DesireTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesireTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesireTaskRequest proto.InternalMessageInfo + +func (m *DesireTaskRequest) GetTaskDefinition() *TaskDefinition { + if m != nil { + return m.TaskDefinition + } + return nil +} + +func (m *DesireTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *DesireTaskRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +type StartTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} } +func (*StartTaskRequest) ProtoMessage() {} +func (*StartTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{2} +} +func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskRequest.Merge(m, src) +} +func (m *StartTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *StartTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskRequest proto.InternalMessageInfo + +func (m *StartTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *StartTaskRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type StartTaskResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ShouldStart bool `protobuf:"varint,2,opt,name=should_start,json=shouldStart,proto3" json:"should_start"` +} + +func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} } +func (*StartTaskResponse) ProtoMessage() {} +func (*StartTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{3} +} +func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartTaskResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskResponse.Merge(m, src) +} +func (m *StartTaskResponse) XXX_Size() int { + return m.Size() +} +func (m *StartTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskResponse proto.InternalMessageInfo + +func (m *StartTaskResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *StartTaskResponse) GetShouldStart() bool { + if m != nil { + return m.ShouldStart + } + return false +} + +// Deprecated: Do not use. +type FailTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + FailureReason string `protobuf:"bytes,2,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` +} + +func (m *FailTaskRequest) Reset() { *m = FailTaskRequest{} } +func (*FailTaskRequest) ProtoMessage() {} +func (*FailTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{4} +} +func (m *FailTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FailTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FailTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FailTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FailTaskRequest.Merge(m, src) +} +func (m *FailTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *FailTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FailTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FailTaskRequest proto.InternalMessageInfo + +func (m *FailTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *FailTaskRequest) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +type RejectTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + RejectionReason string `protobuf:"bytes,2,opt,name=rejection_reason,json=rejectionReason,proto3" json:"rejection_reason"` +} + +func (m *RejectTaskRequest) Reset() { *m = RejectTaskRequest{} } +func (*RejectTaskRequest) ProtoMessage() {} +func (*RejectTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{5} +} +func (m *RejectTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RejectTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RejectTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RejectTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RejectTaskRequest.Merge(m, src) +} +func (m *RejectTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *RejectTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RejectTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RejectTaskRequest proto.InternalMessageInfo + +func (m *RejectTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *RejectTaskRequest) GetRejectionReason() string { + if m != nil { + return m.RejectionReason + } + return "" +} + +type TaskGuidRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` +} + +func (m *TaskGuidRequest) Reset() { *m = TaskGuidRequest{} } +func (*TaskGuidRequest) ProtoMessage() {} +func (*TaskGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{6} +} +func (m *TaskGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskGuidRequest.Merge(m, src) +} +func (m *TaskGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *TaskGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskGuidRequest proto.InternalMessageInfo + +func (m *TaskGuidRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +type CompleteTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + Failed bool `protobuf:"varint,3,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,4,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + Result string `protobuf:"bytes,5,opt,name=result,proto3" json:"result"` +} + +func (m *CompleteTaskRequest) Reset() { *m = CompleteTaskRequest{} } +func (*CompleteTaskRequest) ProtoMessage() {} +func (*CompleteTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{7} +} +func (m *CompleteTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompleteTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompleteTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteTaskRequest.Merge(m, src) +} +func (m *CompleteTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *CompleteTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteTaskRequest proto.InternalMessageInfo + +func (m *CompleteTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *CompleteTaskRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *CompleteTaskRequest) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *CompleteTaskRequest) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *CompleteTaskRequest) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +type TaskCallbackResponse struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Failed bool `protobuf:"varint,2,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,3,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + Result string `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` + Annotation string `protobuf:"bytes,5,opt,name=annotation,proto3" json:"annotation,omitempty"` + CreatedAt int64 `protobuf:"varint,6,opt,name=created_at,json=createdAt,proto3" json:"created_at"` +} + +func (m *TaskCallbackResponse) Reset() { *m = TaskCallbackResponse{} } +func (*TaskCallbackResponse) ProtoMessage() {} +func (*TaskCallbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{8} +} +func (m *TaskCallbackResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskCallbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskCallbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskCallbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskCallbackResponse.Merge(m, src) +} +func (m *TaskCallbackResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskCallbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskCallbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskCallbackResponse proto.InternalMessageInfo + +func (m *TaskCallbackResponse) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *TaskCallbackResponse) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *TaskCallbackResponse) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *TaskCallbackResponse) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +func (m *TaskCallbackResponse) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *TaskCallbackResponse) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +type TasksRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{9} +} +func (m *TasksRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TasksRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TasksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksRequest.Merge(m, src) +} +func (m *TasksRequest) XXX_Size() int { + return m.Size() +} +func (m *TasksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TasksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksRequest proto.InternalMessageInfo + +func (m *TasksRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *TasksRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type TasksResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Tasks []*Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` +} + +func (m *TasksResponse) Reset() { *m = TasksResponse{} } +func (*TasksResponse) ProtoMessage() {} +func (*TasksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{10} +} +func (m *TasksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TasksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TasksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksResponse.Merge(m, src) +} +func (m *TasksResponse) XXX_Size() int { + return m.Size() +} +func (m *TasksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TasksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksResponse proto.InternalMessageInfo + +func (m *TasksResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *TasksResponse) GetTasks() []*Task { + if m != nil { + return m.Tasks + } + return nil +} + +type TaskByGuidRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` +} + +func (m *TaskByGuidRequest) Reset() { *m = TaskByGuidRequest{} } +func (*TaskByGuidRequest) ProtoMessage() {} +func (*TaskByGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{11} +} +func (m *TaskByGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskByGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskByGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskByGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskByGuidRequest.Merge(m, src) +} +func (m *TaskByGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *TaskByGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskByGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskByGuidRequest proto.InternalMessageInfo + +func (m *TaskByGuidRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +type TaskResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Task *Task `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskResponse) Reset() { *m = TaskResponse{} } +func (*TaskResponse) ProtoMessage() {} +func (*TaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{12} +} +func (m *TaskResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskResponse.Merge(m, src) +} +func (m *TaskResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskResponse proto.InternalMessageInfo + +func (m *TaskResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *TaskResponse) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +func init() { + proto.RegisterType((*TaskLifecycleResponse)(nil), "models.TaskLifecycleResponse") + proto.RegisterType((*DesireTaskRequest)(nil), "models.DesireTaskRequest") + proto.RegisterType((*StartTaskRequest)(nil), "models.StartTaskRequest") + proto.RegisterType((*StartTaskResponse)(nil), "models.StartTaskResponse") + proto.RegisterType((*FailTaskRequest)(nil), "models.FailTaskRequest") + proto.RegisterType((*RejectTaskRequest)(nil), "models.RejectTaskRequest") + proto.RegisterType((*TaskGuidRequest)(nil), "models.TaskGuidRequest") + proto.RegisterType((*CompleteTaskRequest)(nil), "models.CompleteTaskRequest") + proto.RegisterType((*TaskCallbackResponse)(nil), "models.TaskCallbackResponse") + proto.RegisterType((*TasksRequest)(nil), "models.TasksRequest") + proto.RegisterType((*TasksResponse)(nil), "models.TasksResponse") + proto.RegisterType((*TaskByGuidRequest)(nil), "models.TaskByGuidRequest") + proto.RegisterType((*TaskResponse)(nil), "models.TaskResponse") +} + +func init() { proto.RegisterFile("task_requests.proto", fileDescriptor_13f778b8a0251259) } + +var fileDescriptor_13f778b8a0251259 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xce, 0x26, 0x6d, 0x68, 0x27, 0x4d, 0xd3, 0xb8, 0x05, 0x59, 0x3d, 0xac, 0x23, 0xc3, 0x21, + 0x42, 0x6a, 0x2a, 0xb5, 0x5c, 0x40, 0xa0, 0x8a, 0xb4, 0x80, 0x90, 0x38, 0x2d, 0x45, 0xea, 0x2d, + 0xda, 0xd8, 0x9b, 0xd4, 0xd4, 0xf1, 0x16, 0xef, 0xfa, 0x50, 0x89, 0x43, 0x1f, 0x81, 0x03, 0x0f, + 0xc1, 0x2b, 0xf0, 0x06, 0x1c, 0x7b, 0xec, 0xc9, 0xa2, 0xee, 0x05, 0xf9, 0xd4, 0x47, 0x40, 0xbb, + 0x76, 0x9b, 0x1f, 0x28, 0x6a, 0x22, 0x71, 0xda, 0x9d, 0x6f, 0xc6, 0xdf, 0xcc, 0x37, 0x3b, 0x99, + 0xc0, 0xaa, 0xa4, 0xe2, 0xa8, 0x13, 0xb2, 0x4f, 0x11, 0x13, 0x52, 0xb4, 0x8e, 0x43, 0x2e, 0xb9, + 0x51, 0x1e, 0x70, 0x97, 0xf9, 0x62, 0x7d, 0xa3, 0xef, 0xc9, 0xc3, 0xa8, 0xdb, 0x72, 0xf8, 0x60, + 0xb3, 0xcf, 0xfb, 0x7c, 0x53, 0xbb, 0xbb, 0x51, 0x4f, 0x5b, 0xda, 0xd0, 0xb7, 0xec, 0xb3, 0x75, + 0x50, 0x5c, 0xf9, 0xbd, 0xc2, 0xc2, 0x90, 0x87, 0x99, 0x61, 0x3f, 0x87, 0xfb, 0xfb, 0x54, 0x1c, + 0xbd, 0xf3, 0x7a, 0xcc, 0x39, 0x71, 0x7c, 0x46, 0x98, 0x38, 0xe6, 0x81, 0x60, 0xc6, 0x43, 0x98, + 0xd7, 0x71, 0x26, 0x6a, 0xa0, 0x66, 0x65, 0xab, 0xda, 0xca, 0x12, 0xb7, 0x5e, 0x29, 0x90, 0x64, + 0x3e, 0xfb, 0x3b, 0x82, 0xfa, 0x1e, 0x13, 0x5e, 0xc8, 0x14, 0x09, 0xc9, 0x4a, 0x35, 0xf6, 0xa1, + 0xa6, 0x4b, 0x77, 0x59, 0xcf, 0x0b, 0x3c, 0xe9, 0xf1, 0x20, 0x27, 0x79, 0x70, 0x4d, 0xa2, 0xa2, + 0xf7, 0x6e, 0xbc, 0xed, 0xd5, 0x34, 0xb6, 0x26, 0x3f, 0x21, 0xcb, 0x72, 0x2c, 0xc8, 0x78, 0x0c, + 0x8b, 0x3a, 0xa4, 0x1f, 0x79, 0xae, 0x59, 0x6c, 0xa0, 0xe6, 0x62, 0xbb, 0x9a, 0xc6, 0xd6, 0x10, + 0x24, 0x0b, 0xea, 0xfa, 0x26, 0xf2, 0x5c, 0xc3, 0x86, 0xb2, 0xcb, 0x07, 0xd4, 0x0b, 0xcc, 0x92, + 0x0e, 0x84, 0x34, 0xb6, 0x72, 0x84, 0xe4, 0xa7, 0xed, 0xc2, 0xca, 0x7b, 0x49, 0x43, 0x39, 0x5a, + 0xf9, 0x58, 0x0e, 0xf4, 0xef, 0x1c, 0x8f, 0xe0, 0x9e, 0xc3, 0x7c, 0xbf, 0x73, 0x53, 0x4d, 0x25, + 0x8d, 0xad, 0x6b, 0x88, 0x94, 0xd5, 0xe5, 0xad, 0x6b, 0x0f, 0xa0, 0x3e, 0x92, 0x65, 0x8a, 0xde, + 0x1a, 0xdb, 0xb0, 0x24, 0x0e, 0x79, 0xe4, 0xbb, 0x1d, 0xa1, 0x08, 0x74, 0x92, 0x85, 0xf6, 0x4a, + 0x1a, 0x5b, 0x63, 0x38, 0xa9, 0x64, 0x96, 0xce, 0x62, 0x7f, 0x86, 0xda, 0x6b, 0xea, 0xf9, 0xb3, + 0x6a, 0x7a, 0x0a, 0xcb, 0x3d, 0xea, 0xf9, 0x51, 0xc8, 0x3a, 0x21, 0xa3, 0x82, 0x07, 0xb9, 0x34, + 0x23, 0x8d, 0xad, 0x09, 0x0f, 0xa9, 0xe6, 0x36, 0xd1, 0xe6, 0xb3, 0xa2, 0x89, 0xec, 0x53, 0x04, + 0x75, 0xc2, 0x3e, 0x32, 0x67, 0xe6, 0xa6, 0xee, 0xc0, 0x4a, 0xa8, 0x09, 0x3c, 0x1e, 0x8c, 0x97, + 0xb0, 0x96, 0xc6, 0xd6, 0x1f, 0x3e, 0x52, 0xbb, 0x41, 0xb2, 0x32, 0xec, 0x17, 0x50, 0xdb, 0xcf, + 0xc9, 0x66, 0xc8, 0x6f, 0xa7, 0x08, 0x56, 0x77, 0xf9, 0xe0, 0xd8, 0x67, 0x92, 0xfd, 0xd7, 0xc1, + 0x50, 0x23, 0xaa, 0x1a, 0xc8, 0x5c, 0x3d, 0xa2, 0x0b, 0xd9, 0x88, 0x66, 0x08, 0xc9, 0xcf, 0xbf, + 0x3c, 0xc7, 0xdc, 0x1d, 0x9f, 0x43, 0xd1, 0x87, 0x4c, 0x44, 0xbe, 0x34, 0xe7, 0x87, 0xbf, 0x80, + 0x0c, 0x21, 0xf9, 0x69, 0x7f, 0x2d, 0xc2, 0x9a, 0x12, 0xb9, 0x4b, 0x7d, 0xbf, 0x4b, 0x9d, 0xe1, + 0x7c, 0x4e, 0xa3, 0x76, 0xa8, 0xa3, 0x38, 0x85, 0x8e, 0xd2, 0xf4, 0x3a, 0xe6, 0x6e, 0xd3, 0x61, + 0x60, 0x00, 0x1a, 0x04, 0x5c, 0x52, 0xbd, 0x6a, 0xb4, 0x5e, 0x32, 0x82, 0x18, 0x1b, 0x00, 0x4e, + 0xc8, 0xa8, 0x64, 0x6e, 0x87, 0x4a, 0xb3, 0xdc, 0x40, 0xcd, 0x52, 0x7b, 0x39, 0x8d, 0xad, 0x11, + 0x94, 0x2c, 0xe6, 0xf7, 0x97, 0xd2, 0x3e, 0x80, 0x25, 0xd5, 0x15, 0x71, 0xfd, 0xf6, 0xc3, 0x65, + 0x82, 0x6e, 0x5b, 0x26, 0x77, 0x5c, 0x06, 0x07, 0x50, 0xcd, 0x99, 0xa7, 0x59, 0x04, 0x36, 0xcc, + 0xab, 0x6e, 0x0b, 0xb3, 0xd8, 0x28, 0x35, 0x2b, 0x5b, 0x4b, 0xa3, 0x4b, 0x94, 0x64, 0x2e, 0x7b, + 0x07, 0xea, 0xca, 0x6c, 0x9f, 0xcc, 0x3a, 0xf8, 0x1f, 0x32, 0xd1, 0xd3, 0x55, 0xd6, 0x80, 0x39, + 0x45, 0xa0, 0x25, 0x4f, 0x16, 0xa6, 0x3d, 0xed, 0x27, 0x67, 0x17, 0xb8, 0x70, 0x7e, 0x81, 0x0b, + 0x57, 0x17, 0x18, 0x9d, 0x26, 0x18, 0x7d, 0x4b, 0x30, 0xfa, 0x91, 0x60, 0x74, 0x96, 0x60, 0xf4, + 0x33, 0xc1, 0xe8, 0x57, 0x82, 0x0b, 0x57, 0x09, 0x46, 0x5f, 0x2e, 0x71, 0xe1, 0xec, 0x12, 0x17, + 0xce, 0x2f, 0x71, 0xa1, 0x5b, 0xd6, 0xff, 0x4d, 0xdb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x42, + 0xbf, 0x2a, 0x4c, 0x02, 0x07, 0x00, 0x00, +} + +func (this *TaskLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskLifecycleResponse) + if !ok { + that2, ok := that.(TaskLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *DesireTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesireTaskRequest) + if !ok { + that2, ok := that.(DesireTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskDefinition.Equal(that1.TaskDefinition) { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + return true +} +func (this *StartTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartTaskRequest) + if !ok { + that2, ok := that.(StartTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *StartTaskResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartTaskResponse) + if !ok { + that2, ok := that.(StartTaskResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if this.ShouldStart != that1.ShouldStart { + return false + } + return true +} +func (this *FailTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FailTaskRequest) + if !ok { + that2, ok := that.(FailTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + return true +} +func (this *RejectTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RejectTaskRequest) + if !ok { + that2, ok := that.(RejectTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.RejectionReason != that1.RejectionReason { + return false + } + return true +} +func (this *TaskGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskGuidRequest) + if !ok { + that2, ok := that.(TaskGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + return true +} +func (this *CompleteTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CompleteTaskRequest) + if !ok { + that2, ok := that.(CompleteTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.Result != that1.Result { + return false + } + return true +} +func (this *TaskCallbackResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskCallbackResponse) + if !ok { + that2, ok := that.(TaskCallbackResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.Result != that1.Result { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + return true +} +func (this *TasksRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TasksRequest) + if !ok { + that2, ok := that.(TasksRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *TasksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TasksResponse) + if !ok { + that2, ok := that.(TasksResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.Tasks) != len(that1.Tasks) { + return false + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(that1.Tasks[i]) { + return false + } + } + return true +} +func (this *TaskByGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskByGuidRequest) + if !ok { + that2, ok := that.(TaskByGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + return true +} +func (this *TaskResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskResponse) + if !ok { + that2, ok := that.(TaskResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *TaskLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesireTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesireTaskRequest{") + if this.TaskDefinition != nil { + s = append(s, "TaskDefinition: "+fmt.Sprintf("%#v", this.TaskDefinition)+",\n") + } + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.StartTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartTaskResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.StartTaskResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "ShouldStart: "+fmt.Sprintf("%#v", this.ShouldStart)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FailTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.FailTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RejectTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.RejectTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "RejectionReason: "+fmt.Sprintf("%#v", this.RejectionReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskGuidRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CompleteTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.CompleteTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskCallbackResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&models.TaskCallbackResponse{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TasksRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TasksRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TasksResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TasksResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Tasks != nil { + s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskByGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskByGuidRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TaskResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTaskRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TaskLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesireTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesireTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesireTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0x12 + } + if m.TaskDefinition != nil { + { + size, err := m.TaskDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartTaskResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ShouldStart { + i-- + if m.ShouldStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FailTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FailTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FailTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RejectTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RejectTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RejectTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectionReason) > 0 { + i -= len(m.RejectionReason) + copy(dAtA[i:], m.RejectionReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.RejectionReason))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompleteTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompleteTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompleteTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x2a + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x22 + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskCallbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCallbackResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskCallbackResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintTaskRequests(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x30 + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x2a + } + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x22 + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x1a + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TasksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TasksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskByGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskByGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskByGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTaskRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovTaskRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TaskLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *DesireTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskDefinition != nil { + l = m.TaskDefinition.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *StartTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *StartTaskResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.ShouldStart { + n += 2 + } + return n +} + +func (m *FailTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *RejectTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.RejectionReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *CompleteTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskCallbackResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovTaskRequests(uint64(m.CreatedAt)) + } + return n +} + +func (m *TasksRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TasksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + } + return n +} + +func (m *TaskByGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func sovTaskRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTaskRequests(x uint64) (n int) { + return sovTaskRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TaskLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesireTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesireTaskRequest{`, + `TaskDefinition:` + strings.Replace(fmt.Sprintf("%v", this.TaskDefinition), "TaskDefinition", "TaskDefinition", 1) + `,`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `}`, + }, "") + return s +} +func (this *StartTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *StartTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartTaskResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ShouldStart:` + fmt.Sprintf("%v", this.ShouldStart) + `,`, + `}`, + }, "") + return s +} +func (this *FailTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FailTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `}`, + }, "") + return s +} +func (this *RejectTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RejectTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `RejectionReason:` + fmt.Sprintf("%v", this.RejectionReason) + `,`, + `}`, + }, "") + return s +} +func (this *TaskGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskGuidRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `}`, + }, "") + return s +} +func (this *CompleteTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompleteTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *TaskCallbackResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCallbackResponse{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *TasksResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTasks := "[]*Task{" + for _, f := range this.Tasks { + repeatedStringForTasks += strings.Replace(fmt.Sprintf("%v", f), "Task", "Task", 1) + "," + } + repeatedStringForTasks += "}" + s := strings.Join([]string{`&TasksResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Tasks:` + repeatedStringForTasks + `,`, + `}`, + }, "") + return s +} +func (this *TaskByGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskByGuidRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `}`, + }, "") + return s +} +func (this *TaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringTaskRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TaskLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesireTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesireTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesireTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskDefinition == nil { + m.TaskDefinition = &TaskDefinition{} + } + if err := m.TaskDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShouldStart", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShouldStart = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FailTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FailTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FailTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RejectTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RejectTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RejectTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectionReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompleteTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompleteTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCallbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCallbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCallbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskByGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskByGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskByGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTaskRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTaskRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTaskRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTaskRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTaskRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTaskRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTaskRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto new file mode 100644 index 00000000..bf43ad66 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "task.proto"; +import "error.proto"; + +message TaskLifecycleResponse { + Error error = 1; +} + +message DesireTaskRequest { + TaskDefinition task_definition = 1 [(gogoproto.jsontag) = "task_definition"]; + string task_guid = 2 [(gogoproto.jsontag) = "task_guid"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; +} + +message StartTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message StartTaskResponse { + Error error = 1; + bool should_start = 2 [(gogoproto.jsontag) = "should_start"]; +} + +message FailTaskRequest { + option deprecated = true; + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string failure_reason = 2 [(gogoproto.jsontag) = "failure_reason"]; +} + +message RejectTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string rejection_reason = 2 [(gogoproto.jsontag) = "rejection_reason"]; +} + +message TaskGuidRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; +} + +message CompleteTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; + bool failed = 3 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 4 [(gogoproto.jsontag) = "failure_reason"]; + string result = 5 [(gogoproto.jsontag) = "result"]; +} + +message TaskCallbackResponse { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + bool failed = 2 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 3 [(gogoproto.jsontag) = "failure_reason"]; + string result = 4 [(gogoproto.jsontag) = "result"]; + string annotation = 5; + int64 created_at = 6 [(gogoproto.jsontag) = "created_at"]; +} + +message TasksRequest{ + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message TasksResponse{ + Error error = 1; + repeated Task tasks = 2; +} + +message TaskByGuidRequest{ + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; +} + +message TaskResponse{ + Error error = 1; + Task task = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/validator.go b/vendor/code.cloudfoundry.org/bbs/models/validator.go new file mode 100644 index 00000000..ff8f1b81 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/validator.go @@ -0,0 +1,58 @@ +package models + +import ( + "bytes" +) + +type ValidationError []error + +func (ve ValidationError) Append(err error) ValidationError { + switch err := err.(type) { + case ValidationError: + return append(ve, err...) + default: + return append(ve, err) + } +} + +func (ve ValidationError) ToError() error { + if len(ve) == 0 { + return nil + } else { + return ve + } +} + +func (ve ValidationError) Error() string { + var buffer bytes.Buffer + + for i, err := range ve { + if err == nil { + continue + } + if i > 0 { + buffer.WriteString(", ") + } + buffer.WriteString(err.Error()) + } + + return buffer.String() +} + +func (ve ValidationError) Empty() bool { + return len(ve) == 0 +} + +type Validator interface { + Validate() error +} + +func (ve ValidationError) Check(validators ...Validator) ValidationError { + for _, v := range validators { + err := v.Validate() + if err != nil { + ve = ve.Append(err) + } + } + return ve +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/version.go b/vendor/code.cloudfoundry.org/bbs/models/version.go new file mode 100644 index 00000000..54c6ad14 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/version.go @@ -0,0 +1,5 @@ +package models + +type Version struct { + CurrentVersion int64 +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go new file mode 100644 index 00000000..fff1b251 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go @@ -0,0 +1,34 @@ +package models + +import ( + "errors" + + "code.cloudfoundry.org/bbs/format" +) + +func (*VolumePlacement) Version() format.Version { + return format.V1 +} + +func (*VolumePlacement) Validate() error { + return nil +} + +func (v *VolumeMount) Validate() error { + var ve ValidationError + if v.Driver == "" { + ve = ve.Append(errors.New("invalid volume_mount driver")) + } + if !(v.Mode == "r" || v.Mode == "rw") { + ve = ve.Append(errors.New("invalid volume_mount mode")) + } + if v.Shared != nil && v.Shared.VolumeId == "" { + ve = ve.Append(errors.New("invalid volume_mount volume id")) + } + + if !ve.Empty() { + return ve + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go new file mode 100644 index 00000000..2193674d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go @@ -0,0 +1,1061 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: volume_mount.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SharedDevice struct { + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id"` + MountConfig string `protobuf:"bytes,2,opt,name=mount_config,json=mountConfig,proto3" json:"mount_config"` +} + +func (m *SharedDevice) Reset() { *m = SharedDevice{} } +func (*SharedDevice) ProtoMessage() {} +func (*SharedDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{0} +} +func (m *SharedDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SharedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SharedDevice.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SharedDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_SharedDevice.Merge(m, src) +} +func (m *SharedDevice) XXX_Size() int { + return m.Size() +} +func (m *SharedDevice) XXX_DiscardUnknown() { + xxx_messageInfo_SharedDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_SharedDevice proto.InternalMessageInfo + +func (m *SharedDevice) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *SharedDevice) GetMountConfig() string { + if m != nil { + return m.MountConfig + } + return "" +} + +type VolumeMount struct { + Driver string `protobuf:"bytes,1,opt,name=driver,proto3" json:"driver"` + ContainerDir string `protobuf:"bytes,3,opt,name=container_dir,json=containerDir,proto3" json:"container_dir"` + Mode string `protobuf:"bytes,6,opt,name=mode,proto3" json:"mode"` + // oneof device { + Shared *SharedDevice `protobuf:"bytes,7,opt,name=shared,proto3" json:"shared"` +} + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{1} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumeMount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeMount) GetDriver() string { + if m != nil { + return m.Driver + } + return "" +} + +func (m *VolumeMount) GetContainerDir() string { + if m != nil { + return m.ContainerDir + } + return "" +} + +func (m *VolumeMount) GetMode() string { + if m != nil { + return m.Mode + } + return "" +} + +func (m *VolumeMount) GetShared() *SharedDevice { + if m != nil { + return m.Shared + } + return nil +} + +type VolumePlacement struct { + DriverNames []string `protobuf:"bytes,1,rep,name=driver_names,json=driverNames,proto3" json:"driver_names"` +} + +func (m *VolumePlacement) Reset() { *m = VolumePlacement{} } +func (*VolumePlacement) ProtoMessage() {} +func (*VolumePlacement) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{2} +} +func (m *VolumePlacement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumePlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumePlacement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VolumePlacement) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumePlacement.Merge(m, src) +} +func (m *VolumePlacement) XXX_Size() int { + return m.Size() +} +func (m *VolumePlacement) XXX_DiscardUnknown() { + xxx_messageInfo_VolumePlacement.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumePlacement proto.InternalMessageInfo + +func (m *VolumePlacement) GetDriverNames() []string { + if m != nil { + return m.DriverNames + } + return nil +} + +func init() { + proto.RegisterType((*SharedDevice)(nil), "models.SharedDevice") + proto.RegisterType((*VolumeMount)(nil), "models.VolumeMount") + proto.RegisterType((*VolumePlacement)(nil), "models.VolumePlacement") +} + +func init() { proto.RegisterFile("volume_mount.proto", fileDescriptor_bbde336a4634d84f) } + +var fileDescriptor_bbde336a4634d84f = []byte{ + // 381 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x6a, 0xa3, 0x40, + 0x18, 0xc7, 0x9d, 0xc4, 0xb8, 0x66, 0x4c, 0x58, 0x77, 0xd8, 0x83, 0x2c, 0xcb, 0x18, 0x3c, 0x85, + 0x85, 0x35, 0xd0, 0x94, 0xd2, 0x73, 0x1a, 0x0a, 0x0d, 0xb4, 0x14, 0x0b, 0xbd, 0x8a, 0xd1, 0x89, + 0x19, 0x88, 0x4e, 0x31, 0x9a, 0x73, 0x1f, 0xa1, 0x8f, 0xd1, 0x47, 0xe9, 0x31, 0xd0, 0x4b, 0x4e, + 0xd2, 0x98, 0x4b, 0xf1, 0x94, 0x47, 0x28, 0xce, 0xd8, 0x36, 0xb9, 0x38, 0xf3, 0xfb, 0x7f, 0x7f, + 0x3f, 0xbf, 0xef, 0x2f, 0x44, 0x2b, 0xb6, 0xc8, 0x22, 0xe2, 0x46, 0x2c, 0x8b, 0x53, 0xfb, 0x21, + 0x61, 0x29, 0x43, 0x4a, 0xc4, 0x02, 0xb2, 0x58, 0xfe, 0xf9, 0x1f, 0xd2, 0x74, 0x9e, 0x4d, 0x6d, + 0x9f, 0x45, 0x83, 0x90, 0x85, 0x6c, 0xc0, 0xcb, 0xd3, 0x6c, 0xc6, 0x89, 0x03, 0xbf, 0x89, 0xd7, + 0x2c, 0x06, 0x3b, 0x77, 0x73, 0x2f, 0x21, 0xc1, 0x98, 0xac, 0xa8, 0x4f, 0xd0, 0x3f, 0xd8, 0xae, + 0x9b, 0xd3, 0xc0, 0x00, 0x3d, 0xd0, 0x6f, 0x8f, 0xba, 0x65, 0x6e, 0x7e, 0x8b, 0x8e, 0x2a, 0xae, + 0x57, 0x01, 0x1a, 0xc2, 0x0e, 0x9f, 0xc0, 0xf5, 0x59, 0x3c, 0xa3, 0xa1, 0xd1, 0xe0, 0x76, 0xbd, + 0xcc, 0xcd, 0x23, 0xdd, 0xd1, 0x38, 0x5d, 0x70, 0xb0, 0x5e, 0x01, 0xd4, 0xee, 0x79, 0x87, 0xeb, + 0x4a, 0x45, 0x16, 0x54, 0x82, 0x84, 0xae, 0x48, 0x52, 0x7f, 0x0d, 0x96, 0xb9, 0x59, 0x2b, 0x4e, + 0x7d, 0xa2, 0x33, 0xd8, 0xf5, 0x59, 0x9c, 0x7a, 0x34, 0x26, 0x89, 0x1b, 0xd0, 0xc4, 0x68, 0x72, + 0xeb, 0xaf, 0x32, 0x37, 0x8f, 0x0b, 0x4e, 0xe7, 0x0b, 0xc7, 0x34, 0x41, 0x7f, 0xa1, 0x5c, 0xa5, + 0x62, 0x28, 0xdc, 0xae, 0x96, 0xb9, 0xc9, 0xd9, 0xe1, 0x4f, 0x74, 0x0e, 0x95, 0x25, 0x5f, 0xdd, + 0xf8, 0xd1, 0x03, 0x7d, 0xed, 0xe4, 0xb7, 0x2d, 0x22, 0xb4, 0x0f, 0x03, 0x11, 0xf3, 0x08, 0x9f, + 0x53, 0x9f, 0x13, 0x59, 0x6d, 0xe8, 0xcd, 0x89, 0xac, 0xca, 0x7a, 0x6b, 0x22, 0xab, 0x2d, 0x5d, + 0xb1, 0x2e, 0xe1, 0x4f, 0xb1, 0xd4, 0xed, 0xc2, 0xf3, 0x49, 0x44, 0xe2, 0xb4, 0x4a, 0x47, 0x8c, + 0xef, 0xc6, 0x5e, 0x44, 0x96, 0x06, 0xe8, 0x35, 0x3f, 0xd3, 0x39, 0xd4, 0x1d, 0x4d, 0xd0, 0x4d, + 0x05, 0xa3, 0xd3, 0xf5, 0x16, 0x83, 0xcd, 0x16, 0x4b, 0xfb, 0x2d, 0x06, 0x8f, 0x05, 0x06, 0xcf, + 0x05, 0x06, 0x2f, 0x05, 0x06, 0xeb, 0x02, 0x83, 0xb7, 0x02, 0x83, 0xf7, 0x02, 0x4b, 0xfb, 0x02, + 0x83, 0xa7, 0x1d, 0x96, 0xd6, 0x3b, 0x2c, 0x6d, 0x76, 0x58, 0x9a, 0x2a, 0xfc, 0x5f, 0x0e, 0x3f, + 0x02, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x23, 0x60, 0xde, 0x18, 0x02, 0x00, 0x00, +} + +func (this *SharedDevice) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SharedDevice) + if !ok { + that2, ok := that.(SharedDevice) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.VolumeId != that1.VolumeId { + return false + } + if this.MountConfig != that1.MountConfig { + return false + } + return true +} +func (this *VolumeMount) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumeMount) + if !ok { + that2, ok := that.(VolumeMount) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Driver != that1.Driver { + return false + } + if this.ContainerDir != that1.ContainerDir { + return false + } + if this.Mode != that1.Mode { + return false + } + if !this.Shared.Equal(that1.Shared) { + return false + } + return true +} +func (this *VolumePlacement) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumePlacement) + if !ok { + that2, ok := that.(VolumePlacement) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.DriverNames) != len(that1.DriverNames) { + return false + } + for i := range this.DriverNames { + if this.DriverNames[i] != that1.DriverNames[i] { + return false + } + } + return true +} +func (this *SharedDevice) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.SharedDevice{") + s = append(s, "VolumeId: "+fmt.Sprintf("%#v", this.VolumeId)+",\n") + s = append(s, "MountConfig: "+fmt.Sprintf("%#v", this.MountConfig)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VolumeMount) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.VolumeMount{") + s = append(s, "Driver: "+fmt.Sprintf("%#v", this.Driver)+",\n") + s = append(s, "ContainerDir: "+fmt.Sprintf("%#v", this.ContainerDir)+",\n") + s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") + if this.Shared != nil { + s = append(s, "Shared: "+fmt.Sprintf("%#v", this.Shared)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VolumePlacement) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.VolumePlacement{") + s = append(s, "DriverNames: "+fmt.Sprintf("%#v", this.DriverNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringVolumeMount(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SharedDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SharedDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SharedDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MountConfig) > 0 { + i -= len(m.MountConfig) + copy(dAtA[i:], m.MountConfig) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.MountConfig))) + i-- + dAtA[i] = 0x12 + } + if len(m.VolumeId) > 0 { + i -= len(m.VolumeId) + copy(dAtA[i:], m.VolumeId) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.VolumeId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Shared != nil { + { + size, err := m.Shared.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVolumeMount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Mode) > 0 { + i -= len(m.Mode) + copy(dAtA[i:], m.Mode) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.Mode))) + i-- + dAtA[i] = 0x32 + } + if len(m.ContainerDir) > 0 { + i -= len(m.ContainerDir) + copy(dAtA[i:], m.ContainerDir) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.ContainerDir))) + i-- + dAtA[i] = 0x1a + } + if len(m.Driver) > 0 { + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumePlacement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumePlacement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumePlacement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DriverNames) > 0 { + for iNdEx := len(m.DriverNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DriverNames[iNdEx]) + copy(dAtA[i:], m.DriverNames[iNdEx]) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.DriverNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintVolumeMount(dAtA []byte, offset int, v uint64) int { + offset -= sovVolumeMount(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SharedDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeId) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.MountConfig) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.ContainerDir) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.Mode) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + if m.Shared != nil { + l = m.Shared.Size() + n += 1 + l + sovVolumeMount(uint64(l)) + } + return n +} + +func (m *VolumePlacement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DriverNames) > 0 { + for _, s := range m.DriverNames { + l = len(s) + n += 1 + l + sovVolumeMount(uint64(l)) + } + } + return n +} + +func sovVolumeMount(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVolumeMount(x uint64) (n int) { + return sovVolumeMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SharedDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SharedDevice{`, + `VolumeId:` + fmt.Sprintf("%v", this.VolumeId) + `,`, + `MountConfig:` + fmt.Sprintf("%v", this.MountConfig) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ContainerDir:` + fmt.Sprintf("%v", this.ContainerDir) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Shared:` + strings.Replace(this.Shared.String(), "SharedDevice", "SharedDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumePlacement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumePlacement{`, + `DriverNames:` + fmt.Sprintf("%v", this.DriverNames) + `,`, + `}`, + }, "") + return s +} +func valueToStringVolumeMount(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SharedDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SharedDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SharedDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shared == nil { + m.Shared = &SharedDevice{} + } + if err := m.Shared.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumePlacement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumePlacement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumePlacement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverNames = append(m.DriverNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVolumeMount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVolumeMount + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVolumeMount + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVolumeMount + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVolumeMount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVolumeMount = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVolumeMount = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto new file mode 100644 index 00000000..3139b87e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message SharedDevice { + string volume_id = 1 [(gogoproto.jsontag) = "volume_id"]; + string mount_config = 2 [(gogoproto.jsontag) = "mount_config"]; +} + +message VolumeMount { + reserved 2, 4, 5; + + string driver = 1 [(gogoproto.jsontag) = "driver"]; + string container_dir = 3 [(gogoproto.jsontag) = "container_dir"]; + string mode = 6 [(gogoproto.jsontag) = "mode"]; + + // oneof device { + SharedDevice shared = 7 [(gogoproto.jsontag) = "shared"]; + // } +} + +message VolumePlacement { + repeated string driver_names = 1 [(gogoproto.jsontag) = "driver_names"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/package.go b/vendor/code.cloudfoundry.org/bbs/package.go new file mode 100644 index 00000000..0d460353 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/package.go @@ -0,0 +1 @@ +package bbs // import "code.cloudfoundry.org/bbs" diff --git a/vendor/code.cloudfoundry.org/bbs/routes.go b/vendor/code.cloudfoundry.org/bbs/routes.go new file mode 100644 index 00000000..9edc913a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/routes.go @@ -0,0 +1,162 @@ +package bbs + +import "github.com/tedsuo/rata" + +const ( + // Ping + PingRoute_r0 = "Ping" + + // Domains + DomainsRoute_r0 = "Domains" + UpsertDomainRoute_r0 = "UpsertDomain" + + // Actual LRPs + ActualLRPsRoute_r0 = "ActualLRPs" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupsRoute_r0 = "ActualLRPGroups" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupsByProcessGuidRoute_r0 = "ActualLRPGroupsByProcessGuid" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupByProcessGuidAndIndexRoute_r0 = "ActualLRPGroupsByProcessGuidAndIndex" + + // Actual LRP Lifecycle + ClaimActualLRPRoute_r0 = "ClaimActualLRP" + StartActualLRPRoute_r1 = "StartActualLRP" + // Deprecated: use StartActaulLRPRoute_r1 instead + StartActualLRPRoute_r0 = "StartActualLRP_r0" + CrashActualLRPRoute_r0 = "CrashActualLRP" + FailActualLRPRoute_r0 = "FailActualLRP" + RemoveActualLRPRoute_r0 = "RemoveActualLRP" + RetireActualLRPRoute_r0 = "RetireActualLRP" + + // Evacuation + RemoveEvacuatingActualLRPRoute_r0 = "RemoveEvacuatingActualLRP" + EvacuateClaimedActualLRPRoute_r0 = "EvacuateClaimedActualLRP" + EvacuateCrashedActualLRPRoute_r0 = "EvacuateCrashedActualLRP" + EvacuateStoppedActualLRPRoute_r0 = "EvacuateStoppedActualLRP" + EvacuateRunningActualLRPRoute_r1 = "EvacuateRunningActualLRP" + // Deprecated: use EvacuateRunningActualLRPRoute_r1 instead + EvacuateRunningActualLRPRoute_r0 = "EvacuateRunningActualLRP_r0" + + // Desired LRPs + DesiredLRPsRoute_r3 = "DesiredLRPs" + DesiredLRPSchedulingInfosRoute_r0 = "DesiredLRPSchedulingInfos" + DesiredLRPSchedulingInfoByProcessGuid_r0 = "DesiredLRPSchedulingInfoByProcessGuid" + DesiredLRPRoutingInfosRoute_r0 = "DesiredLRPRoutingInfos" + DesiredLRPByProcessGuidRoute_r3 = "DesiredLRPByProcessGuid" + // Deprecated: use DsiredLRPByProcessGuidRoute_r3 instead + DesiredLRPsRoute_r2 = "DesiredLRPs_r2" + // Deprecated: use DsiredLRPByProcessGuidRoute_r3 instead + DesiredLRPByProcessGuidRoute_r2 = "DesiredLRPByProcessGuid_r2" + + // Desire LRP Lifecycle + DesireDesiredLRPRoute_r2 = "DesireDesiredLRP" + UpdateDesiredLRPRoute_r0 = "UpdateDesireLRP" + RemoveDesiredLRPRoute_r0 = "RemoveDesiredLRP" + + // Tasks + TasksRoute_r3 = "Tasks" + TaskByGuidRoute_r3 = "TaskByGuid" + DesireTaskRoute_r2 = "DesireTask" + StartTaskRoute_r0 = "StartTask" + CancelTaskRoute_r0 = "CancelTask" + // Deprecated: use CancelTaskRotue_r0 instead + FailTaskRoute_r0 = "FailTask" + RejectTaskRoute_r0 = "RejectTask" + CompleteTaskRoute_r0 = "CompleteTask" + ResolvingTaskRoute_r0 = "ResolvingTask" + DeleteTaskRoute_r0 = "DeleteTask" + // Deprecated: use TaskRoute_r3 instead + TasksRoute_r2 = "Tasks_r2" + // Deprecated: use TaskByGuid_r3 instead + TaskByGuidRoute_r2 = "TaskByGuid_r2" + + // Event Streaming + //Deprecated: use LRPInstanceEventStreamRoute_1 instead + LRPGroupEventStreamRoute_r1 = "EventStream" + TaskEventStreamRoute_r1 = "TaskEventStream" + LRPInstanceEventStreamRoute_r1 = "LRPInstanceEventStream" + //Deprecated: use LRPInstanceEventStreamRoute_1 instead + EventStreamRoute_r0 = "EventStream_r0" + // Deprecated: use TaskEventStreamRoute_r1 instead + TaskEventStreamRoute_r0 = "TaskEventStream_r0" + //Deprecated: use LrpInstanceEventStreamRoute_r1 instead + LrpInstanceEventStreamRoute_r0 = "LrpInstanceEventStream_r0" + + // Cell Presence + CellsRoute_r0 = "Cells" +) + +var Routes = rata.Routes{ + // Ping + {Path: "/v1/ping", Method: "POST", Name: PingRoute_r0}, + + // Domains + {Path: "/v1/domains/list", Method: "POST", Name: DomainsRoute_r0}, + {Path: "/v1/domains/upsert", Method: "POST", Name: UpsertDomainRoute_r0}, + + // Actual LRPs + {Path: "/v1/actual_lrps/list", Method: "POST", Name: ActualLRPsRoute_r0}, + {Path: "/v1/actual_lrp_groups/list", Method: "POST", Name: ActualLRPGroupsRoute_r0}, // DEPRECATED + {Path: "/v1/actual_lrp_groups/list_by_process_guid", Method: "POST", Name: ActualLRPGroupsByProcessGuidRoute_r0}, // DEPRECATED + {Path: "/v1/actual_lrp_groups/get_by_process_guid_and_index", Method: "POST", Name: ActualLRPGroupByProcessGuidAndIndexRoute_r0}, // DEPRECATED + + // Actual LRP Lifecycle + {Path: "/v1/actual_lrps/claim", Method: "POST", Name: ClaimActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/start.r1", Method: "POST", Name: StartActualLRPRoute_r1}, + {Path: "/v1/actual_lrps/start", Method: "POST", Name: StartActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/crash", Method: "POST", Name: CrashActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/fail", Method: "POST", Name: FailActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/remove", Method: "POST", Name: RemoveActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/retire", Method: "POST", Name: RetireActualLRPRoute_r0}, + + // Evacuation + {Path: "/v1/actual_lrps/remove_evacuating", Method: "POST", Name: RemoveEvacuatingActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_claimed", Method: "POST", Name: EvacuateClaimedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_crashed", Method: "POST", Name: EvacuateCrashedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_stopped", Method: "POST", Name: EvacuateStoppedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_running.r1", Method: "POST", Name: EvacuateRunningActualLRPRoute_r1}, + {Path: "/v1/actual_lrps/evacuate_running", Method: "POST", Name: EvacuateRunningActualLRPRoute_r0}, + + // Desired LRPs + {Path: "/v1/desired_lrp_scheduling_infos/list", Method: "POST", Name: DesiredLRPSchedulingInfosRoute_r0}, + {Path: "/v1/desired_lrp_scheduling_infos/get_by_process_guid", Method: "POST", Name: DesiredLRPSchedulingInfoByProcessGuid_r0}, + {Path: "/v1/desired_lrp_routing_infos/list", Method: "POST", Name: DesiredLRPRoutingInfosRoute_r0}, + + {Path: "/v1/desired_lrps/list.r3", Method: "POST", Name: DesiredLRPsRoute_r3}, + {Path: "/v1/desired_lrps/get_by_process_guid.r3", Method: "POST", Name: DesiredLRPByProcessGuidRoute_r3}, + {Path: "/v1/desired_lrps/list.r2", Method: "POST", Name: DesiredLRPsRoute_r2}, // DEPRECATED + {Path: "/v1/desired_lrps/get_by_process_guid.r2", Method: "POST", Name: DesiredLRPByProcessGuidRoute_r2}, // DEPRECATED + + // Desire LPR Lifecycle + {Path: "/v1/desired_lrp/desire.r2", Method: "POST", Name: DesireDesiredLRPRoute_r2}, + {Path: "/v1/desired_lrp/update", Method: "POST", Name: UpdateDesiredLRPRoute_r0}, + {Path: "/v1/desired_lrp/remove", Method: "POST", Name: RemoveDesiredLRPRoute_r0}, + + // Tasks + {Path: "/v1/tasks/list.r3", Method: "POST", Name: TasksRoute_r3}, + {Path: "/v1/tasks/get_by_task_guid.r3", Method: "POST", Name: TaskByGuidRoute_r3}, + {Path: "/v1/tasks/list.r2", Method: "POST", Name: TasksRoute_r2}, // DEPRECATED + {Path: "/v1/tasks/get_by_task_guid.r2", Method: "POST", Name: TaskByGuidRoute_r2}, // DEPRECATED + + // Task Lifecycle + {Path: "/v1/tasks/desire.r2", Method: "POST", Name: DesireTaskRoute_r2}, + {Path: "/v1/tasks/start", Method: "POST", Name: StartTaskRoute_r0}, + {Path: "/v1/tasks/cancel", Method: "POST", Name: CancelTaskRoute_r0}, + {Path: "/v1/tasks/fail", Method: "POST", Name: FailTaskRoute_r0}, // DEPRECATED + {Path: "/v1/tasks/reject", Method: "POST", Name: RejectTaskRoute_r0}, + {Path: "/v1/tasks/complete", Method: "POST", Name: CompleteTaskRoute_r0}, + {Path: "/v1/tasks/resolving", Method: "POST", Name: ResolvingTaskRoute_r0}, + {Path: "/v1/tasks/delete", Method: "POST", Name: DeleteTaskRoute_r0}, + + // Event Streaming + {Path: "/v1/events.r1", Method: "GET", Name: LRPGroupEventStreamRoute_r1}, // DEPRECATED + {Path: "/v1/events/tasks.r1", Method: "POST", Name: TaskEventStreamRoute_r1}, + {Path: "/v1/events/lrp_instances.r1", Method: "POST", Name: LRPInstanceEventStreamRoute_r1}, + {Path: "/v1/events", Method: "GET", Name: EventStreamRoute_r0}, // DEPRECATED + {Path: "/v1/events/tasks", Method: "POST", Name: TaskEventStreamRoute_r0}, // DEPRECATED + {Path: "/v1/events/lrp_instances", Method: "POST", Name: LrpInstanceEventStreamRoute_r0}, // DEPRECATED + + // Cells + {Path: "/v1/cells/list.r1", Method: "POST", Name: CellsRoute_r0}, +} diff --git a/vendor/code.cloudfoundry.org/bbs/trace/request_id.go b/vendor/code.cloudfoundry.org/bbs/trace/request_id.go new file mode 100644 index 00000000..3eff1ee5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/trace/request_id.go @@ -0,0 +1,53 @@ +package trace + +import ( + "context" + "net/http" + "strings" + + "code.cloudfoundry.org/lager/v3" + "github.com/openzipkin/zipkin-go/idgenerator" + "github.com/openzipkin/zipkin-go/model" +) + +const ( + RequestIdHeader = "X-Vcap-Request-Id" +) + +type RequestIdHeaderCtxKeyType struct{} + +var RequestIdHeaderCtxKey = RequestIdHeaderCtxKeyType{} + +func ContextWithRequestId(req *http.Request) context.Context { + return context.WithValue(req.Context(), RequestIdHeaderCtxKey, RequestIdFromRequest(req)) +} + +func RequestIdFromContext(ctx context.Context) string { + if val, ok := ctx.Value(RequestIdHeaderCtxKey).(string); ok { + return val + } + + return "" +} + +func RequestIdFromRequest(req *http.Request) string { + return req.Header.Get(RequestIdHeader) +} + +func LoggerWithTraceInfo(logger lager.Logger, traceIDStr string) lager.Logger { + if traceIDStr == "" { + return logger.WithData(nil) + } + traceHex := strings.Replace(traceIDStr, "-", "", -1) + traceID, err := model.TraceIDFromHex(traceHex) + if err != nil { + return logger.WithData(nil) + } + + spanID := idgenerator.NewRandom128().SpanID(model.TraceID{}) + return logger.WithData(lager.Data{"trace-id": traceID.String(), "span-id": spanID.String()}) +} + +func GenerateTraceID() string { + return idgenerator.NewRandom128().TraceID().String() +} diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore b/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore new file mode 100644 index 00000000..9ed3b07c --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS b/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE b/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE b/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE new file mode 100644 index 00000000..3c8dd5b6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/README.md b/vendor/code.cloudfoundry.org/cfhttp/v2/README.md new file mode 100644 index 00000000..9f820001 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/README.md @@ -0,0 +1,30 @@ +# cfhttp + +[![Go Report +Card](https://goreportcard.com/badge/code.cloudfoundry.org/cfhttp)](https://goreportcard.com/report/code.cloudfoundry.org/cfhttp) +[![Go +Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/cfhttp.svg)](https://pkg.go.dev/code.cloudfoundry.org/cfhttp) + +Wrapper for official go http package + +> \[!NOTE\] +> +> This repository should be imported as +> `code.cloudfoundry.org/cfhttp/v2`. + +# Contributing + +See the [Contributing.md](./.github/CONTRIBUTING.md) for more +information on how to contribute. + +# Working Group Charter + +This repository is maintained by [App Runtime +Platform](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/app-runtime-platform.md) +under `Diego` area. + +> \[!IMPORTANT\] +> +> Content in this file is managed by the [CI task +> `sync-readme`](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/blob/main/shared/tasks/sync-readme/metadata.yml) +> and is generated by CI following a convention. diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/client.go b/vendor/code.cloudfoundry.org/cfhttp/v2/client.go new file mode 100644 index 00000000..22e6d2c5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/client.go @@ -0,0 +1,129 @@ +// Package cfhttp provides defaults and helpers for building http clients. +// It serves to help maintain the same HTTP configuration across multiple +// CloudFoundry components. +package cfhttp + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type config struct { + requestTimeout time.Duration + dialTimeout time.Duration + tcpKeepAliveTimeout time.Duration + idleConnTimeout time.Duration + disableKeepAlives bool + maxIdleConnsPerHost int + tlsConfig *tls.Config +} + +// Option can be used to configure different parts of the HTTP client, including +// its internal transport or the connection dialer. +type Option func(*config) + +// WithStreamingDefaults modifies the HTTP client with defaults that are more +// suitable for consuming server-sent events on persistent connections. +func WithStreamingDefaults() Option { + return func(c *config) { + c.tcpKeepAliveTimeout = 30 * time.Second + c.disableKeepAlives = false + c.requestTimeout = 0 + } +} + +// WithRequestTimeout sets the total time limit for requests made by this Client. +// +// A setting of 0 means no timeout. +func WithRequestTimeout(t time.Duration) Option { + return func(c *config) { + c.requestTimeout = t + } +} + +// WithDialTimeout sets the time limit for connecting to the remote address. This +// includes DNS resolution and retries on multiple IP addresses. +// +// A setting of 0 means no timeout. +func WithDialTimeout(t time.Duration) Option { + return func(c *config) { + c.dialTimeout = t + } +} + +// WithTCPKeepAliveTimeout sets the keep-alive period for an active TCP +// connection. +// +// A setting of 0 disables TCP keep-alives. +func WithTCPKeepAliveTimeout(t time.Duration) Option { + return func(c *config) { + c.tcpKeepAliveTimeout = t + } +} + +// WithIdleConnTimeout sets the maximum amount of time a keep-alive +// connection can be idle before it closes itself. +// +// A setting of 0 means no timeout. +func WithIdleConnTimeout(t time.Duration) Option { + return func(c *config) { + c.idleConnTimeout = t + } +} + +// WithDisableKeepAlives disables keep-alive on every HTTP connection so that +// every connection is closed as soon as its request is done. +func WithDisableKeepAlives() Option { + return func(c *config) { + c.disableKeepAlives = true + } +} + +// WithMaxIdleConnsPerHost sets the maximum number of keep-alive connections that +// can be active at a time per remote host. +// +// A setting of 0 sets means the MaxIdleConnsPerHost is +// http.DefaultMaxIdleConnsPerHost (2 at the time of writing). +func WithMaxIdleConnsPerHost(max int) Option { + return func(c *config) { + c.maxIdleConnsPerHost = max + } +} + +// WithTLSConfig sets the TLS configuration on the HTTP client. +func WithTLSConfig(t *tls.Config) Option { + return func(c *config) { + c.tlsConfig = t + } +} + +// NewClient builds a HTTP client with suitable defaults. +// The Options can optionally set configuration options on the +// HTTP client, transport, or net dialer. Options are applied +// in the order that they are passed in, so it is possible for +// later Options previous ones. +func NewClient(options ...Option) *http.Client { + cfg := config{ + dialTimeout: 5 * time.Second, + tcpKeepAliveTimeout: 0, + idleConnTimeout: 90 * time.Second, + } + for _, v := range options { + v(&cfg) + } + return &http.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: cfg.dialTimeout, + KeepAlive: cfg.tcpKeepAliveTimeout, + }).DialContext, + IdleConnTimeout: cfg.idleConnTimeout, + DisableKeepAlives: cfg.disableKeepAlives, + MaxIdleConnsPerHost: cfg.maxIdleConnsPerHost, + TLSClientConfig: cfg.tlsConfig, + }, + Timeout: cfg.requestTimeout, + } +} diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/package.go b/vendor/code.cloudfoundry.org/cfhttp/v2/package.go new file mode 100644 index 00000000..13638153 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/package.go @@ -0,0 +1 @@ +package cfhttp // import "code.cloudfoundry.org/cfhttp/v2" diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf b/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf new file mode 100644 index 00000000..eba7af74 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1008","-ST1005","-ST1001","-ST1012","-ST1000","-ST1003","-ST1016","-ST1020","-ST1021","-ST1022"] diff --git a/vendor/code.cloudfoundry.org/lager/v3/.gitignore b/vendor/code.cloudfoundry.org/lager/v3/.gitignore new file mode 100644 index 00000000..bc1e5082 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/.gitignore @@ -0,0 +1,38 @@ +# Builds +bin + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# IntelliJ +.idea + +# Dependencies +vendor + +# macOS +.DS_Store + +# Vim files +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] +Session.vim +Sessionx.vim +.netrwhist +*~ +tags +[._]*.un~ + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS b/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/lager/v3/LICENSE b/vendor/code.cloudfoundry.org/lager/v3/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/lager/v3/NOTICE b/vendor/code.cloudfoundry.org/lager/v3/NOTICE new file mode 100644 index 00000000..3c8dd5b6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/lager/v3/README.md b/vendor/code.cloudfoundry.org/lager/v3/README.md new file mode 100644 index 00000000..9a4248ad --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/README.md @@ -0,0 +1,102 @@ +lager +===== + +**Note**: This repository should be imported as `code.cloudfoundry.org/lager`. + +Lager is a logging library for go. + +## Usage + +Instantiate a logger with the name of your component. + +```go +import ( + "code.cloudfoundry.org/lager/v3" +) + +logger := lager.NewLogger("my-app") +``` + +### Lager and [`log/slog`](https://pkg.go.dev/log/slog) +Lager was written long before Go 1.21 introduced structured logging in the standard library. +There are some wrapper functions for interoperability between Lager and `slog`, +which are only available when using Go 1.21 and higher. + +Lager can be used as an [`slog.Handler`](https://pkg.go.dev/log/slog#Handler) using the `NewHandler()` function: + +```go +func codeThatAcceptsSlog(l *slog.Logger) { ... } + +lagerLogger := lager.NewLogger("my-lager-logger") + +codeThatAcceptsSlog(slog.New(lager.NewHandler(lagerLogger))) +``` + +An `slog.Logger` can be used as a Lager `Sink` using the `NewSlogSink()` function: +```go +var *slog.Logger l = codeThatReturnsSlog() + +lagerLogger := lager.NewLogger("my-lager-logger") + +lagerLogger.RegisterSink(lager.NewSlogSink(l)) +``` + +### Sinks + +Lager can write logs to a variety of destinations. You can specify the destinations +using Lager sinks: + +To write to an arbitrary `Writer` object: + +```go +logger.RegisterSink(lager.NewWriterSink(myWriter, lager.INFO)) +``` + +### Emitting logs + +Lager supports the usual level-based logging, with an optional argument for arbitrary key-value data. + +```go +logger.Info("doing-stuff", lager.Data{ + "informative": true, +}) +``` + +output: +```json +{ "source": "my-app", "message": "doing-stuff", "data": { "informative": true }, "timestamp": 1232345, "log_level": 1 } +``` + +Error messages also take an `Error` object: + +```go +logger.Error("failed-to-do-stuff", errors.New("Something went wrong")) +``` + +output: +```json +{ "source": "my-app", "message": "failed-to-do-stuff", "data": { "error": "Something went wrong" }, "timestamp": 1232345, "log_level": 1 } +``` + +### Sessions + +You can avoid repetition of contextual data using 'Sessions': + +```go + +contextualLogger := logger.Session("my-task", lager.Data{ + "request-id": 5, +}) + +contextualLogger.Info("my-action") +``` + +output: + +```json +{ "source": "my-app", "message": "my-task.my-action", "data": { "request-id": 5 }, "timestamp": 1232345, "log_level": 1 } +``` + +## License + +Lager is [Apache 2.0](https://github.com/cloudfoundry/lager/blob/master/LICENSE) licensed. diff --git a/vendor/code.cloudfoundry.org/lager/v3/handler.go b/vendor/code.cloudfoundry.org/lager/v3/handler.go new file mode 100644 index 00000000..2cdaf7c3 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/handler.go @@ -0,0 +1,162 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "fmt" + "log/slog" +) + +// NewHandler wraps the logger as a slog.Handler +// The supplied Logger must be a lager.logger +// type created by lager.NewLogger(), otherwise +// it panics. +// +// Note the following log level conversions: +// +// slog.LevelDebug -> lager.DEBUG +// slog.LevelError -> lager.ERROR +// slog.LevelError -> lager.FATAL +// default -> lager.INFO +func NewHandler(l Logger) slog.Handler { + switch ll := l.(type) { + case *logger: + return &handler{logger: ll} + default: + panic("lager.Logger must be an instance of lager.logger") + } +} + +// Type decorator is used to decorate the attributes with groups and more attributes +type decorator func(map[string]any) map[string]any + +// Type handler is a slog.Handler that wraps a lager logger. +// It uses the logger concrete type rather than the Logger interface +// because it uses methods not available on the interface. +type handler struct { + logger *logger + decorators []decorator +} + +// Enabled always returns true +func (h *handler) Enabled(_ context.Context, _ slog.Level) bool { + return true +} + +// Handle converts a slog.Record into a lager.LogFormat and passes it to every Sink +func (h *handler) Handle(_ context.Context, r slog.Record) error { + log := LogFormat{ + time: r.Time, + Timestamp: formatTimestamp(r.Time), + Source: h.logger.component, + Message: fmt.Sprintf("%s.%s", h.logger.task, r.Message), + LogLevel: toLogLevel(r.Level), + Data: h.logger.baseData(h.decorate(attrFromRecord(r))), + } + + for _, sink := range h.logger.sinks { + sink.Log(log) + } + + return nil +} + +// WithAttrs returns a new slog.Handler which always adds the specified attributes +func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, attrDecorator(attrs)), + } +} + +// WithGroup returns a new slog.Handler which always logs attributes in the specified group +func (h *handler) WithGroup(name string) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, groupDecorator(name)), + } +} + +// decorate will decorate a body using the decorators that have been defined +func (h *handler) decorate(body map[string]any) map[string]any { + for i := len(h.decorators) - 1; i >= 0; i-- { // reverse iteration + body = h.decorators[i](body) + } + return body +} + +// attrDecorator returns a decorator for the specified attributes +func attrDecorator(attrs []slog.Attr) decorator { + return func(body map[string]any) map[string]any { + if body == nil { + body = make(map[string]any) + } + processAttrs(attrs, body) + return body + } +} + +// groupDecorator returns a decorator for the specified group name +func groupDecorator(group string) decorator { + return func(body map[string]any) map[string]any { + switch len(body) { + case 0: + return nil + default: + return map[string]any{group: body} + } + } +} + +// attrFromRecord extracts and processes the attributes from a record +func attrFromRecord(r slog.Record) map[string]any { + if r.NumAttrs() == 0 { + return nil + } + + body := make(map[string]any, r.NumAttrs()) + r.Attrs(func(attr slog.Attr) bool { + processAttr(attr, body) + return true + }) + + return body +} + +// processAttrs calls processAttr() for each attribute +func processAttrs(attrs []slog.Attr, target map[string]any) { + for _, attr := range attrs { + processAttr(attr, target) + } +} + +// processAttr adds the attribute to the target with appropriate transformations +func processAttr(attr slog.Attr, target map[string]any) { + rv := attr.Value.Resolve() + + switch { + case rv.Kind() == slog.KindGroup && attr.Key != "": + nt := make(map[string]any) + processAttrs(attr.Value.Group(), nt) + target[attr.Key] = nt + case rv.Kind() == slog.KindGroup && attr.Key == "": + processAttrs(attr.Value.Group(), target) + case attr.Key == "": + // skip + default: + target[attr.Key] = rv.Any() + } +} + +// toLogLevel converts from slog levels to lager levels +func toLogLevel(l slog.Level) LogLevel { + switch l { + case slog.LevelDebug: + return DEBUG + case slog.LevelError, slog.LevelWarn: + return ERROR + default: + return INFO + } +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go new file mode 100644 index 00000000..c34b9ade --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go @@ -0,0 +1 @@ +package truncate // import "code.cloudfoundry.org/lager/v3/internal/truncate" diff --git a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go new file mode 100644 index 00000000..f4fda22d --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go @@ -0,0 +1,174 @@ +package truncate + +import ( + "reflect" +) + +// Value recursively walks through the value provided by `v` and truncates +// any strings longer than `maxLength`. +// Example: +// type foobar struct{A string; B string} +// truncate.Value(foobar{A:"foo",B:"bar"}, 20) == foobar{A:"foo",B:"bar"} +// truncate.Value(foobar{A:strings.Repeat("a", 25),B:"bar"}, 20) == foobar{A:"aaaaaaaa-(truncated)",B:"bar"} +func Value(v interface{}, maxLength int) interface{} { + rv := reflect.ValueOf(v) + tv := truncateValue(rv, maxLength) + if rv != tv { + return tv.Interface() + } + return v +} + +func truncateValue(rv reflect.Value, maxLength int) reflect.Value { + if maxLength <= 0 { + return rv + } + + switch rv.Kind() { + case reflect.Interface: + return truncateInterface(rv, maxLength) + case reflect.Ptr: + return truncatePtr(rv, maxLength) + case reflect.Struct: + return truncateStruct(rv, maxLength) + case reflect.Map: + return truncateMap(rv, maxLength) + case reflect.Array: + return truncateArray(rv, maxLength) + case reflect.Slice: + return truncateSlice(rv, maxLength) + case reflect.String: + return truncateString(rv, maxLength) + } + return rv +} + +func truncateInterface(rv reflect.Value, maxLength int) reflect.Value { + tv := truncateValue(rv.Elem(), maxLength) + if tv != rv.Elem() { + return tv + } + return rv +} + +func truncatePtr(rv reflect.Value, maxLength int) reflect.Value { + tv := truncateValue(rv.Elem(), maxLength) + if rv.Elem() != tv { + tvp := reflect.New(rv.Elem().Type()) + tvp.Elem().Set(tv) + return tvp + } + return rv +} + +func truncateStruct(rv reflect.Value, maxLength int) reflect.Value { + numFields := rv.NumField() + fields := make([]reflect.Value, numFields) + changed := false + for i := 0; i < numFields; i++ { + fv := rv.Field(i) + tv := truncateValue(fv, maxLength) + if fv != tv { + changed = true + } + fields[i] = tv + } + if changed { + nv := reflect.New(rv.Type()).Elem() + for i, fv := range fields { + nv.Field(i).Set(fv) + } + return nv + } + return rv +} + +func truncateMap(rv reflect.Value, maxLength int) reflect.Value { + keys := rv.MapKeys() + truncatedMap := make(map[reflect.Value]reflect.Value) + changed := false + for _, key := range keys { + mapV := rv.MapIndex(key) + tv := truncateValue(mapV, maxLength) + if mapV != tv { + changed = true + } + truncatedMap[key] = tv + } + if changed { + nv := reflect.MakeMap(rv.Type()) + for k, v := range truncatedMap { + nv.SetMapIndex(k, v) + } + return nv + } + return rv + +} + +func truncateArray(rv reflect.Value, maxLength int) reflect.Value { + return truncateList(rv, maxLength, func(size int) reflect.Value { + arrayType := reflect.ArrayOf(size, rv.Index(0).Type()) + return reflect.New(arrayType).Elem() + }) +} + +func truncateSlice(rv reflect.Value, maxLength int) reflect.Value { + return truncateList(rv, maxLength, func(size int) reflect.Value { + return reflect.MakeSlice(rv.Type(), size, size) + }) +} + +func truncateList(rv reflect.Value, maxLength int, newList func(size int) reflect.Value) reflect.Value { + size := rv.Len() + truncatedValues := make([]reflect.Value, size) + changed := false + for i := 0; i < size; i++ { + elemV := rv.Index(i) + tv := truncateValue(elemV, maxLength) + if elemV != tv { + changed = true + } + truncatedValues[i] = tv + } + if changed { + nv := newList(size) + for i, v := range truncatedValues { + nv.Index(i).Set(v) + } + return nv + } + return rv +} + +func truncateString(rv reflect.Value, maxLength int) reflect.Value { + s := String(rv.String(), maxLength) + if s != rv.String() { + return reflect.ValueOf(s) + } + return rv + +} + +const truncated = "-(truncated)" +const lenTruncated = len(truncated) + +// String truncates long strings from the middle, but leaves strings shorter +// than `maxLength` untouched. +// If the string is shorter than the string "-(truncated)" and the string +// exceeds `maxLength`, the output will not be truncated. +// Example: +// truncate.String(strings.Repeat("a", 25), 20) == "aaaaaaaa-(truncated)" +// truncate.String("foobar", 20) == "foobar" +// truncate.String("foobar", 5) == "foobar" +func String(s string, maxLength int) string { + if maxLength <= 0 || len(s) < lenTruncated || len(s) <= maxLength { + return s + } + + strBytes := []byte(s) + truncatedBytes := []byte(truncated) + prefixLength := maxLength - lenTruncated + prefix := strBytes[0:prefixLength] + return string(append(prefix, truncatedBytes...)) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go b/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go new file mode 100644 index 00000000..a0901480 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go @@ -0,0 +1,115 @@ +package lager + +import ( + "encoding/json" + "regexp" +) + +const awsAccessKeyIDPattern = `AKIA[A-Z0-9]{16}` +const awsSecretAccessKeyPattern = `KEY["']?\s*(?::|=>|=)\s*["']?[A-Z0-9/\+=]{40}["']?` +const cryptMD5Pattern = `\$1\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{22}` +const cryptSHA256Pattern = `\$5\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{43}` +const cryptSHA512Pattern = `\$6\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{86}` +const privateKeyHeaderPattern = `-----BEGIN(.*)PRIVATE KEY-----` + +type JSONRedacter struct { + keyMatchers []*regexp.Regexp + valueMatchers []*regexp.Regexp +} + +func NewJSONRedacter(keyPatterns []string, valuePatterns []string) (*JSONRedacter, error) { + if keyPatterns == nil { + keyPatterns = []string{"[Pp]wd", "[Pp]ass"} + } + if valuePatterns == nil { + valuePatterns = DefaultValuePatterns() + } + ret := &JSONRedacter{} + for _, v := range keyPatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.keyMatchers = append(ret.keyMatchers, r) + } + for _, v := range valuePatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.valueMatchers = append(ret.valueMatchers, r) + } + return ret, nil +} + +func (r JSONRedacter) Redact(data []byte) []byte { + var jsonBlob interface{} + err := json.Unmarshal(data, &jsonBlob) + if err != nil { + return handleError(err) + } + r.redactValue(&jsonBlob) + + data, err = json.Marshal(jsonBlob) + if err != nil { + return handleError(err) + } + + return data +} + +func (r JSONRedacter) redactValue(data *interface{}) interface{} { + if data == nil { + return data + } + + if a, ok := (*data).([]interface{}); ok { + r.redactArray(&a) + } else if m, ok := (*data).(map[string]interface{}); ok { + r.redactObject(&m) + } else if s, ok := (*data).(string); ok { + for _, m := range r.valueMatchers { + if m.MatchString(s) { + (*data) = "*REDACTED*" + break + } + } + } + return (*data) +} + +func (r JSONRedacter) redactArray(data *[]interface{}) { + for i := range *data { + r.redactValue(&((*data)[i])) + } +} + +func (r JSONRedacter) redactObject(data *map[string]interface{}) { + for k, v := range *data { + for _, m := range r.keyMatchers { + if m.MatchString(k) { + (*data)[k] = "*REDACTED*" + break + } + } + if (*data)[k] != "*REDACTED*" { + (*data)[k] = r.redactValue(&v) + } + } +} + +func handleError(err error) []byte { + var content []byte + if _, ok := err.(*json.UnsupportedTypeError); ok { + data := map[string]interface{}{"lager serialisation error": err.Error()} + content, err = json.Marshal(data) + } + if err != nil { + panic(err) + } + return content +} + +func DefaultValuePatterns() []string { + return []string{awsAccessKeyIDPattern, awsSecretAccessKeyPattern, cryptMD5Pattern, cryptSHA256Pattern, cryptSHA512Pattern, privateKeyHeaderPattern} +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/logger.go b/vendor/code.cloudfoundry.org/lager/v3/logger.go new file mode 100644 index 00000000..64a29d7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/logger.go @@ -0,0 +1,217 @@ +package lager + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "sync/atomic" + "time" + + "github.com/openzipkin/zipkin-go/idgenerator" + "github.com/openzipkin/zipkin-go/model" +) + +const ( + StackTraceBufferSize = 1024 * 100 + RequestIdHeader = "X-Vcap-Request-Id" +) + +type Logger interface { + RegisterSink(Sink) + Session(task string, data ...Data) Logger + SessionName() string + Debug(action string, data ...Data) + Info(action string, data ...Data) + Error(action string, err error, data ...Data) + Fatal(action string, err error, data ...Data) + WithData(Data) Logger + WithTraceInfo(*http.Request) Logger +} + +type logger struct { + component string + task string + sinks []Sink + sessionID string + nextSession uint32 + data Data + idGenerator idgenerator.IDGenerator +} + +func NewLogger(component string) Logger { + return &logger{ + component: component, + task: component, + sinks: []Sink{}, + data: Data{}, + idGenerator: idgenerator.NewRandom128(), + } +} + +func (l *logger) RegisterSink(sink Sink) { + l.sinks = append(l.sinks, sink) +} + +func (l *logger) SessionName() string { + return l.task +} + +func (l *logger) Session(task string, data ...Data) Logger { + sid := atomic.AddUint32(&l.nextSession, 1) + + var sessionIDstr string + + if l.sessionID != "" { + sessionIDstr = fmt.Sprintf("%s.%d", l.sessionID, sid) + } else { + sessionIDstr = fmt.Sprintf("%d", sid) + } + + return &logger{ + component: l.component, + task: fmt.Sprintf("%s.%s", l.task, task), + sinks: l.sinks, + sessionID: sessionIDstr, + data: l.baseData(data...), + idGenerator: l.idGenerator, + } +} + +func (l *logger) WithData(data Data) Logger { + return &logger{ + component: l.component, + task: l.task, + sinks: l.sinks, + sessionID: l.sessionID, + data: l.baseData(data), + idGenerator: l.idGenerator, + } +} + +func (l *logger) WithTraceInfo(req *http.Request) Logger { + traceIDHeader := req.Header.Get(RequestIdHeader) + if traceIDHeader == "" { + return l.WithData(nil) + } + traceHex := strings.Replace(traceIDHeader, "-", "", -1) + traceID, err := model.TraceIDFromHex(traceHex) + if err != nil { + return l.WithData(nil) + } + + spanID := l.idGenerator.SpanID(model.TraceID{}) + return l.WithData(Data{"trace-id": traceID.String(), "span-id": spanID.String()}) +} + +func (l *logger) Debug(action string, data ...Data) { + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: DEBUG, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Info(action string, data ...Data) { + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: INFO, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Error(action string, err error, data ...Data) { + logData := l.baseData(data...) + + if err != nil { + logData["error"] = err.Error() + } + + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: ERROR, + Data: logData, + Error: err, + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Fatal(action string, err error, data ...Data) { + logData := l.baseData(data...) + + stackTrace := make([]byte, StackTraceBufferSize) + stackSize := runtime.Stack(stackTrace, false) + stackTrace = stackTrace[:stackSize] + + if err != nil { + logData["error"] = err.Error() + } + + logData["trace"] = string(stackTrace) + + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: FATAL, + Data: logData, + Error: err, + } + + for _, sink := range l.sinks { + sink.Log(log) + } + + panic(err) +} + +func (l *logger) baseData(givenData ...Data) Data { + data := Data{} + + for k, v := range l.data { + data[k] = v + } + + if len(givenData) > 0 { + for _, dataArg := range givenData { + for key, val := range dataArg { + data[key] = val + } + } + } + + if l.sessionID != "" { + data["session"] = l.sessionID + } + + return data +} + +func formatTimestamp(t time.Time) string { + return fmt.Sprintf("%.9f", float64(t.UnixNano())/1e9) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/models.go b/vendor/code.cloudfoundry.org/lager/v3/models.go new file mode 100644 index 00000000..63077e72 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/models.go @@ -0,0 +1,151 @@ +package lager + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" +) + +type LogLevel int + +const ( + DEBUG LogLevel = iota + INFO + ERROR + FATAL +) + +var logLevelStr = [...]string{ + DEBUG: "debug", + INFO: "info", + ERROR: "error", + FATAL: "fatal", +} + +func (l LogLevel) String() string { + if DEBUG <= l && l <= FATAL { + return logLevelStr[l] + } + return "invalid" +} + +func LogLevelFromString(s string) (LogLevel, error) { + for k, v := range logLevelStr { + if v == s { + return LogLevel(k), nil + } + } + return -1, fmt.Errorf("invalid log level: %s", s) +} + +type Data map[string]interface{} + +type rfc3339Time time.Time + +const rfc3339Nano = "2006-01-02T15:04:05.000000000Z07:00" + +func (t rfc3339Time) MarshalJSON() ([]byte, error) { + // Use AppendFormat to avoid slower string operations, instead we only + // operate on a byte slice + // Avoid creating a new copy of t with a cast, instead use type conversion + stamp := append((time.Time)(t).UTC().AppendFormat([]byte{'"'}, rfc3339Nano), '"') + return stamp, nil +} + +func (t *rfc3339Time) UnmarshalJSON(data []byte) error { + return (*time.Time)(t).UnmarshalJSON(data) +} + +type LogFormat struct { + Timestamp string `json:"timestamp"` + Source string `json:"source"` + Message string `json:"message"` + LogLevel LogLevel `json:"log_level"` + Data Data `json:"data"` + Error error `json:"-"` + time time.Time +} + +func (log LogFormat) ToJSON() []byte { + content, err := json.Marshal(log) + if err != nil { + log.Data = dataForJSONMarhallingError(err, log.Data) + content, err = json.Marshal(log) + if err != nil { + panic(err) + } + } + return content +} + +type prettyLogFormat struct { + Timestamp rfc3339Time `json:"timestamp"` + Level string `json:"level"` + Source string `json:"source"` + Message string `json:"message"` + Data Data `json:"data"` + Error error `json:"-"` +} + +func (log LogFormat) toPrettyJSON() []byte { + t := log.time + if t.IsZero() { + t = parseTimestamp(log.Timestamp) + } + + prettyLog := prettyLogFormat{ + Timestamp: rfc3339Time(t), + Level: log.LogLevel.String(), + Source: log.Source, + Message: log.Message, + Data: log.Data, + Error: log.Error, + } + + content, err := json.Marshal(prettyLog) + + if err != nil { + prettyLog.Data = dataForJSONMarhallingError(err, prettyLog.Data) + content, err = json.Marshal(prettyLog) + if err != nil { + panic(err) + } + } + + return content +} + +func dataForJSONMarhallingError(err error, data Data) Data { + _, ok1 := err.(*json.UnsupportedTypeError) + _, ok2 := err.(*json.MarshalerError) + errKey := "unknown_error" + if ok1 || ok2 { + errKey = "lager serialisation error" + } + + return map[string]interface{}{ + errKey: err.Error(), + "data_dump": fmt.Sprintf("%#v", data), + } +} + +func parseTimestamp(s string) time.Time { + if s == "" { + return time.Now() + } + n := strings.IndexByte(s, '.') + if n <= 0 || n == len(s)-1 { + return time.Now() + } + sec, err := strconv.ParseInt(s[:n], 10, 64) + if err != nil || sec < 0 { + return time.Now() + } + nsec, err := strconv.ParseInt(s[n+1:], 10, 64) + if err != nil || nsec < 0 { + return time.Now() + } + return time.Unix(sec, nsec) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go b/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go new file mode 100644 index 00000000..aeb714d9 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go @@ -0,0 +1,37 @@ +package lager + +import ( + "sync/atomic" +) + +type ReconfigurableSink struct { + sink Sink + + minLogLevel int32 +} + +func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *ReconfigurableSink { + return &ReconfigurableSink{ + sink: sink, + + minLogLevel: int32(initialMinLogLevel), + } +} + +func (sink *ReconfigurableSink) Log(log LogFormat) { + minLogLevel := LogLevel(atomic.LoadInt32(&sink.minLogLevel)) + + if log.LogLevel < minLogLevel { + return + } + + sink.sink.Log(log) +} + +func (sink *ReconfigurableSink) SetMinLevel(level LogLevel) { + atomic.StoreInt32(&sink.minLogLevel, int32(level)) +} + +func (sink *ReconfigurableSink) GetMinLevel() LogLevel { + return LogLevel(atomic.LoadInt32(&sink.minLogLevel)) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go new file mode 100644 index 00000000..17a30295 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go @@ -0,0 +1,62 @@ +package lager + +import ( + "encoding/json" +) + +type redactingSink struct { + sink Sink + jsonRedacter *JSONRedacter +} + +// NewRedactingSink creates a sink that redacts sensitive information from the +// data field. The old behavior of NewRedactingWriterSink (which was removed +// in v2) can be obtained using the following code: +// +// redactingSink, err := NewRedactingSink( +// NewWriterSink(writer, minLogLevel), +// keyPatterns, +// valuePatterns, +// ) +// +// if err != nil { +// return nil, err +// } +// +// return NewReconfigurableSink( +// redactingSink, +// minLogLevel, +// ), nil +// +func NewRedactingSink(sink Sink, keyPatterns []string, valuePatterns []string) (Sink, error) { + jsonRedacter, err := NewJSONRedacter(keyPatterns, valuePatterns) + if err != nil { + return nil, err + } + + return &redactingSink{ + sink: sink, + jsonRedacter: jsonRedacter, + }, nil +} + +func (sink *redactingSink) Log(log LogFormat) { + rawJSON, err := json.Marshal(log.Data) + if err != nil { + log.Data = dataForJSONMarhallingError(err, log.Data) + + rawJSON, err = json.Marshal(log.Data) + if err != nil { + panic(err) + } + } + + redactedJSON := sink.jsonRedacter.Redact(rawJSON) + + err = json.Unmarshal(redactedJSON, &log.Data) + if err != nil { + panic(err) + } + + sink.sink.Log(log) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go b/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go new file mode 100644 index 00000000..095e16a6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go @@ -0,0 +1,63 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "log/slog" +) + +// Type slogSink wraps an slog.Logger as a Sink +type slogSink struct { + logger *slog.Logger +} + +// NewSlogSink wraps a slog.Logger as a lager Sink +// This allows code using slog to integrate with code that uses lager +// Note the following log level conversions: +// +// lager.DEBUG -> slog.LevelDebug +// lager.ERROR -> slog.LevelError +// lager.FATAL -> slog.LevelError +// default -> slog.LevelInfo +func NewSlogSink(l *slog.Logger) Sink { + return &slogSink{logger: l} +} + +// Log exists to implement the lager.Sink interface. +func (l *slogSink) Log(f LogFormat) { + // For lager.Error() and lager.Fatal() the error (and stacktrace) are already in f.Data + r := slog.NewRecord(f.time, toSlogLevel(f.LogLevel), f.Message, 0) + r.AddAttrs(toAttr(f.Data)...) + + // By calling the handler directly we can pass through the original timestamp, + // whereas calling a method on the logger would generate a new timestamp + l.logger.Handler().Handle(context.Background(), r) +} + +// toAttr converts a lager.Data into []slog.Attr +func toAttr(d Data) []slog.Attr { + l := len(d) + if l == 0 { + return nil + } + + attr := make([]slog.Attr, 0, l) + for k, v := range d { + attr = append(attr, slog.Any(k, v)) + } + + return attr +} + +// toSlogLevel converts lager log levels to slog levels +func toSlogLevel(l LogLevel) slog.Level { + switch l { + case DEBUG: + return slog.LevelDebug + case ERROR, FATAL: + return slog.LevelError + default: + return slog.LevelInfo + } +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/tools.go b/vendor/code.cloudfoundry.org/lager/v3/tools.go new file mode 100644 index 00000000..56304cc4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package lager + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go new file mode 100644 index 00000000..ba261fe7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go @@ -0,0 +1,32 @@ +package lager + +import "code.cloudfoundry.org/lager/v3/internal/truncate" + +type truncatingSink struct { + sink Sink + maxDataStringLength int +} + +// NewTruncatingSink returns a sink that truncates strings longer than the max +// data string length +// Example: +// writerSink := lager.NewWriterSink(os.Stdout, lager.INFO) +// sink := lager.NewTruncatingSink(testSink, 20) +// logger := lager.NewLogger("test") +// logger.RegisterSink(sink) +// logger.Info("message", lager.Data{"A": strings.Repeat("a", 25)}) +func NewTruncatingSink(sink Sink, maxDataStringLength int) Sink { + return &truncatingSink{ + sink: sink, + maxDataStringLength: maxDataStringLength, + } +} + +func (sink *truncatingSink) Log(log LogFormat) { + truncatedData := Data{} + for k, v := range log.Data { + truncatedData[k] = truncate.Value(v, sink.maxDataStringLength) + } + log.Data = truncatedData + sink.sink.Log(log) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go b/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go new file mode 100644 index 00000000..e78177a5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go @@ -0,0 +1,66 @@ +package lager + +import ( + "io" + "sync" +) + +// A Sink represents a write destination for a Logger. It provides +// a thread-safe interface for writing logs +type Sink interface { + //Log to the sink. Best effort -- no need to worry about errors. + Log(LogFormat) +} + +type writerSink struct { + writer io.Writer + minLogLevel LogLevel + writeL *sync.Mutex +} + +func NewWriterSink(writer io.Writer, minLogLevel LogLevel) Sink { + return &writerSink{ + writer: writer, + minLogLevel: minLogLevel, + writeL: new(sync.Mutex), + } +} + +func (sink *writerSink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + // Convert to json outside of critical section to minimize time spent holding lock + message := append(log.ToJSON(), '\n') + + sink.writeL.Lock() + sink.writer.Write(message) //nolint:errcheck + sink.writeL.Unlock() +} + +type prettySink struct { + writer io.Writer + minLogLevel LogLevel + writeL sync.Mutex +} + +func NewPrettySink(writer io.Writer, minLogLevel LogLevel) Sink { + return &prettySink{ + writer: writer, + minLogLevel: minLogLevel, + } +} + +func (sink *prettySink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + // Convert to json outside of critical section to minimize time spent holding lock + message := append(log.toPrettyJSON(), '\n') + + sink.writeL.Lock() + sink.writer.Write(message) //nolint:errcheck + sink.writeL.Unlock() +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig new file mode 100644 index 00000000..b0c95367 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes new file mode 100644 index 00000000..176a458f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md new file mode 100644 index 00000000..2ce45dd4 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md new file mode 100644 index 00000000..b5ab5642 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml new file mode 100644 index 00000000..8e6346bb --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '3' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go new file mode 100644 index 00000000..d06e516d --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go new file mode 100644 index 00000000..77ebc61b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/v3/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go new file mode 100644 index 00000000..5ea74f89 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go new file mode 100644 index 00000000..98cbb37a --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go new file mode 100644 index 00000000..3c62d6b6 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000..0b4659b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000..081c86fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000..1e91766a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000..f6502e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000..390d4e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000..3496dc99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000..18b2a331 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000..165b2110 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000..e0846a35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000..fd736cb1 --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000..8c8c37d2 --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000..8ce9d3cf --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,596 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000..c794b939 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000..bef1d604 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000..4580bab1 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000..8d07fd6c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000..ba4d7464 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,674 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*3) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + Column: src.Column, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var docURL string + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + if docURL == "" { + docURL = s.DocURL + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + DocURL: docURL, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000..f47a2439 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,869 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + DocURL string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + docURLX int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + Column int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber || !columnnumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + l.Line[li].Column, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", "[vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000..a15696ba --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000..b2f9fd54 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 00000000..a61021d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 00000000..778bfd7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 00000000..dd1d143c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 00000000..f61356db --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,234 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...any) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...any) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...any) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + + if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { + colorMode = ColorModeNone + } + + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...any) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...any) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 00000000..2b36b2fe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,80 @@ +package build + +import ( + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig, true) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + var testBinPath string + if len(goFlagsConfig.O) != 0 { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test" + } else { + testBinPath = goFlagsConfig.O + } + } + if len(testBinPath) == 0 { + testBinPath = path.Join(suite.Path, suite.PackageName+".test") + } + fmt.Printf("Compiled %s\n", testBinPath) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 00000000..f0e7331f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...any) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...any) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...any) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 00000000..79b83a3a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,54 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + for _, arg := range args { + if len(arg) > 1 && strings.HasPrefix(arg, "-") { + AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error()) + } + } + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 00000000..c3f6d3a1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,180 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 00000000..a367a1fc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 00000000..b2dc59be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig/v3" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 00000000..cf3b7cb6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,265 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig/v3" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + BuildTags string + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + BuildTags: getBuildTags(conf.Tags), + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + defer modFile.Close() + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 00000000..4dab07d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,43 @@ +package generators + +var specText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 00000000..28c7aa6f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,76 @@ +package generators + +import ( + "fmt" + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string + Tags string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 00000000..7bbe6be0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,173 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig, preserveSymbols) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000..87cfa111 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if !sortFunc(i) { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 00000000..8e16d2bb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,227 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +// loads each profile, merges them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) + if err != nil { + return err + } + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) + } + } + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) + if err != nil { + return err + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + _ = proFile.Close() + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 00000000..41052ea1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 00000000..df99875b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,284 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.MatchString(file.Name()) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.MatchString(file.Name()) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.MatchString(file.Name()) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 00000000..bd9ca7d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 00000000..9da1bab3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 00000000..6c61f09d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 00000000..bd6b8fbf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + _ "go.uber.org/automaxprocs" + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 00000000..5d8d00bb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,301 @@ +package outline + +import ( + "go/ast" + "go/token" + "strconv" + + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + labels = append(labels, ls...) + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 00000000..f0a6b5d2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + name = "ginkgo" + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 00000000..e99d557d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,130 @@ +package outline + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b bytes.Buffer + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + csvWriter := csv.NewWriter(&b) + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + + row := []string{ + n.Name, + n.Text, + strconv.Itoa(n.Start), + strconv.Itoa(n.End), + strconv.FormatBool(n.Spec), + strconv.FormatBool(n.Focused), + strconv.FormatBool(n.Pending), + labels, + } + csvWriter.Write(row) + + // Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation + csvWriter.Flush() + + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 00000000..36698d46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 00000000..03875b97 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig, false) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = time.Until(endTime) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 00000000..7dd29439 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 00000000..6c485c5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 00000000..26418ac6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 00000000..a34d9435 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 00000000..0e6ae1f2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,117 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if isHiddenFile(info) { + continue + } + + if goTestRegExp.MatchString(info.Name()) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.MatchString(info.Name()) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 00000000..b4892beb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 00000000..53272df7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 00000000..fe1ca305 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig, false) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 00000000..79bfa87d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,177 @@ +package interrupt_handler + +import ( + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + InterruptCauseSignal + InterruptCauseAbortByOtherProcess +) + +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Channel chan any + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus +} + +type InterruptHandler struct { + c chan any + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan any + signals []os.Signal + requestAbortCheck chan any +} + +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } + handler := &InterruptHandler{ + c: make(chan any), + lock: &sync.Mutex{}, + stop: make(chan any), + requestAbortCheck: make(chan any), + client: client, + signals: signals, + } + handler.registerForInterrupts() + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts() { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, handler.signals...) + + // cross-process abort handling + var abortChannel chan any + if handler.client != nil { + abortChannel = make(chan any) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + go func(abortChannel chan any) { + var interruptCause InterruptCause + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + case <-handler.stop: + signal.Stop(signalChannel) + return + } + abortChannel = nil + + handler.lock.Lock() + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan any) + } + handler.lock.Unlock() + } + }(abortChannel) +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + status := InterruptStatus{ + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, + } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 00000000..bf0de496 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 00000000..fcf8da83 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 00000000..4234d802 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,72 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan any + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 00000000..4aa10ae4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,166 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data any) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data any) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + return err == ErrorGone +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 00000000..8a1b7a5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,242 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +// Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +// Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) + + //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +// Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +// The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan any { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 00000000..bb4675a0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,136 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data any) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 00000000..1574f99a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +// Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +// Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +// Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +// The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan any { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 00000000..ab9e1137 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,234 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan any + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan any), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 00000000..74ad0768 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,788 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastCharWasNewline bool + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastCharWasNewline: true, + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + v := r.conf.Verbosity() + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} + +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + + header := r.specDenoter + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } + } + + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + + switch report.State { + case types.SpecStatePassed: + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("%s PASSED", header) + } + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true + } + case types.SpecStatePending: + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true + } + case types.SpecStateSkipped: + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) + } + } + + // If we have no content to show, just emit the header and return + if !reportHasContent { + r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } + return + } + + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } + + //Emit Stdout/Stderr Output + if showSeparateStdSection { + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) + } + + if showSeparateVisibilityAlwaysReportsSection { + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) + } + + if showTimeline { + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) + } + + // Emit Failure Message + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { + r.emitBlock("\n") + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) + } + } + + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) + } +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } + + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + r._emit(s, false, false) +} + +func (r *DefaultReporter) emitBlock(s string) { + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { + return + } + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...any) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, r.f(report.LeafNodeText)) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + highlightIndex := -1 + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + highlightIndex = 0 + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + highlightIndex = i + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 + } + + out := "" + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) + } + } + + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 00000000..613072eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: false, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 00000000..5d3e8db9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,69 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return nil +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 00000000..562e0f62 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,390 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "path" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 && !config.OmitSpecLabels { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } + name = strings.TrimSpace(name) + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + Owner: owner, + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: spec.Failure.Message, + Type: "interrupted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") + } + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 00000000..5e726c46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,29 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 00000000..e990ad82 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,105 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateInterrupted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateAborted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 00000000..57e87517 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,159 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" + "sync" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + return clLocator.getCodeLocation(skip + 1) +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.MatchString(stack[i*2+1]) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 00000000..2e827efe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,804 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailOnEmpty bool + FailFast bool + FlakeAttempts int + MustPassRepeatedly int + DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration + Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually + OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + GracePeriod: 30 * time.Second, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{} +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.GOMAXPROCS(-1) + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.GOMAXPROCS(-1) +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + BuildVCS bool + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool + O string +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +func (g GoFlagsConfig) NeedsSymbols() bool { + return g.BinaryMustBePreserved() +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis", + Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis", + Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]any{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile)) + } + if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile)) + } + if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile)) + } + if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile)) + } + if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile)) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + + if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols { + goFlagsConfig.LDFlags = "-w -s" + } + + args := []string{"test", "-c", packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]any{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]any{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]any{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]any{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]any{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]any{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]any{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 00000000..518989a8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info any + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 00000000..e2519f67 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,177 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 00000000..1d96ae02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 00000000..c2796b54 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,653 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed `+mustGet+`. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "No parameters have been passed to the Table Function", + Message: "The Table Function expected at least 1 parameter", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} + +func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error { + return GinkgoError{ + Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path), + Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag), + } +} + +func (g ginkgoErrors) FlagAfterPositionalParameter() error { + return GinkgoError{ + Heading: "Malformed arguments - detected a flag after the package liste", + Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}", + } +} + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 00000000..cc21df71 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 00000000..8409653f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,490 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string + AlwaysExport bool +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings any + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 00000000..40a909b6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,583 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + case lfTokenSetOperation: + return 4 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + +func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.ContainsRune(cutset, runes[j]) { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/:") + i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + if input == "" { + return func(_ []string) bool { return true }, nil + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 00000000..63f7a9f6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,190 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel +type ReportEntryValue struct { + raw any //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value any) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() any { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Location captures the location of the AddReportEntry call + Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() any { + return entry.Value.GetRawValue() +} + +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 00000000..ddcbec1b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,922 @@ +package types + +import ( + "encoding/json" + "fmt" + "os" + "sort" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 + +var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +func init() { + if os.Getenv("GINKGO_TIME_FORMAT") != "" { + GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") + } +} + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite + SpecReports SpecReports +} + +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + copy(reports, report.SpecReports) + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator + NumAttempts int + + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +// Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +// FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +// LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + +type SpecReports []SpecReport + +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + TimelineLocation TimelineLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` +} + +func (f Failure) IsZero() bool { + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted + SpecStateTimedout +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportBeforeSuite + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 00000000..158ac2fd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.23.4" diff --git a/vendor/github.com/openzipkin/zipkin-go/LICENSE b/vendor/github.com/openzipkin/zipkin-go/LICENSE new file mode 100644 index 00000000..2ff72246 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2017 The OpenZipkin Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go new file mode 100644 index 00000000..0cb5a96f --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go @@ -0,0 +1,130 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package idgenerator contains several Span and Trace ID generators which can be +used by the Zipkin tracer. Additional third party generators can be plugged in +if they adhere to the IDGenerator interface. +*/ +package idgenerator + +import ( + "math/rand" + "sync" + "time" + + "github.com/openzipkin/zipkin-go/model" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + // NewSource returns a new pseudo-random Source seeded with the given value. + // Unlike the default Source used by top-level functions, this source is not + // safe for concurrent use by multiple goroutines. Hence the need for a mutex. + seededIDLock sync.Mutex +) + +// IDGenerator interface can be used to provide the Zipkin Tracer with custom +// implementations to generate Span and Trace IDs. +type IDGenerator interface { + SpanID(traceID model.TraceID) model.ID // Generates a new Span ID + TraceID() model.TraceID // Generates a new Trace ID +} + +// NewRandom64 returns an ID Generator which can generate 64 bit trace and span +// id's +func NewRandom64() IDGenerator { + return &randomID64{} +} + +// NewRandom128 returns an ID Generator which can generate 128 bit trace and 64 +// bit span id's +func NewRandom128() IDGenerator { + return &randomID128{} +} + +// NewRandomTimestamped generates 128 bit time sortable traceid's and 64 bit +// spanid's. +func NewRandomTimestamped() IDGenerator { + return &randomTimestamped{} +} + +// randomID64 can generate 64 bit traceid's and 64 bit spanid's. +type randomID64 struct{} + +func (r *randomID64) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (r *randomID64) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +// randomID128 can generate 128 bit traceid's and 64 bit spanid's. +type randomID128 struct{} + +func (r *randomID128) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + High: uint64(seededIDGen.Int63()), + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (r *randomID128) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +// randomTimestamped can generate 128 bit time sortable traceid's compatible +// with AWS X-Ray and 64 bit spanid's. +type randomTimestamped struct{} + +func (t *randomTimestamped) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + High: uint64(time.Now().Unix()<<32) + uint64(seededIDGen.Int31()), + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (t *randomTimestamped) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go new file mode 100644 index 00000000..02d09fb1 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go @@ -0,0 +1,60 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "time" +) + +// ErrValidTimestampRequired error +var ErrValidTimestampRequired = errors.New("valid annotation timestamp required") + +// Annotation associates an event that explains latency with a timestamp. +type Annotation struct { + Timestamp time.Time + Value string +} + +// MarshalJSON implements custom JSON encoding +func (a *Annotation) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Timestamp int64 `json:"timestamp"` + Value string `json:"value"` + }{ + Timestamp: a.Timestamp.Round(time.Microsecond).UnixNano() / 1e3, + Value: a.Value, + }) +} + +// UnmarshalJSON implements custom JSON decoding +func (a *Annotation) UnmarshalJSON(b []byte) error { + type Alias Annotation + annotation := &struct { + TimeStamp uint64 `json:"timestamp"` + *Alias + }{ + Alias: (*Alias)(a), + } + if err := json.Unmarshal(b, &annotation); err != nil { + return err + } + if annotation.TimeStamp < 1 { + return ErrValidTimestampRequired + } + a.Timestamp = time.Unix(0, int64(annotation.TimeStamp)*1e3) + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go new file mode 100644 index 00000000..4cae4e07 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/doc.go @@ -0,0 +1,23 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package model contains the Zipkin V2 model which is used by the Zipkin Go +tracer implementation. + +Third party instrumentation libraries can use the model and transport packages +found in this Zipkin Go library to directly interface with the Zipkin Server or +Zipkin Collectors without the need to use the tracer implementation itself. +*/ +package model diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go new file mode 100644 index 00000000..48e2afd6 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go @@ -0,0 +1,50 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "net" + "strings" +) + +// Endpoint holds the network context of a node in the service graph. +type Endpoint struct { + ServiceName string + IPv4 net.IP + IPv6 net.IP + Port uint16 +} + +// MarshalJSON exports our Endpoint into the correct format for the Zipkin V2 API. +func (e Endpoint) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + ServiceName string `json:"serviceName,omitempty"` + IPv4 net.IP `json:"ipv4,omitempty"` + IPv6 net.IP `json:"ipv6,omitempty"` + Port uint16 `json:"port,omitempty"` + }{ + strings.ToLower(e.ServiceName), + e.IPv4, + e.IPv6, + e.Port, + }) +} + +// Empty returns if all Endpoint properties are empty / unspecified. +func (e *Endpoint) Empty() bool { + return e == nil || + (e.ServiceName == "" && e.Port == 0 && len(e.IPv4) == 0 && len(e.IPv6) == 0) +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go new file mode 100644 index 00000000..d247c020 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/kind.go @@ -0,0 +1,27 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Kind clarifies context of timestamp, duration and remoteEndpoint in a span. +type Kind string + +// Available Kind values +const ( + Undetermined Kind = "" + Client Kind = "CLIENT" + Server Kind = "SERVER" + Producer Kind = "PRODUCER" + Consumer Kind = "CONSUMER" +) diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go new file mode 100644 index 00000000..cf30bfac --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span.go @@ -0,0 +1,161 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "strings" + "time" +) + +// unmarshal errors +var ( + ErrValidTraceIDRequired = errors.New("valid traceId required") + ErrValidIDRequired = errors.New("valid span id required") + ErrValidDurationRequired = errors.New("valid duration required") +) + +// BaggageFields holds the interface for consumers needing to interact with +// the fields in application logic. +type BaggageFields interface { + // Get returns the values for a field identified by its key. + Get(key string) []string + // Add adds the provided values to a header designated by key. If not + // accepted by the baggage implementation, it will return false. + Add(key string, value ...string) bool + // Set sets the provided values to a header designated by key. If not + // accepted by the baggage implementation, it will return false. + Set(key string, value ...string) bool + // Delete removes the field data designated by key. If not accepted by the + // baggage implementation, it will return false. + Delete(key string) bool + // Iterate will iterate over the available fields and for each one it will + // trigger the callback function. + Iterate(f func(key string, values []string)) +} + +// SpanContext holds the context of a Span. +type SpanContext struct { + TraceID TraceID `json:"traceId"` + ID ID `json:"id"` + ParentID *ID `json:"parentId,omitempty"` + Debug bool `json:"debug,omitempty"` + Sampled *bool `json:"-"` + Err error `json:"-"` + Baggage BaggageFields `json:"-"` +} + +// SpanModel structure. +// +// If using this library to instrument your application you will not need to +// directly access or modify this representation. The SpanModel is exported for +// use cases involving 3rd party Go instrumentation libraries desiring to +// export data to a Zipkin server using the Zipkin V2 Span model. +type SpanModel struct { + SpanContext + Name string `json:"name,omitempty"` + Kind Kind `json:"kind,omitempty"` + Timestamp time.Time `json:"-"` + Duration time.Duration `json:"-"` + Shared bool `json:"shared,omitempty"` + LocalEndpoint *Endpoint `json:"localEndpoint,omitempty"` + RemoteEndpoint *Endpoint `json:"remoteEndpoint,omitempty"` + Annotations []Annotation `json:"annotations,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// MarshalJSON exports our Model into the correct format for the Zipkin V2 API. +func (s SpanModel) MarshalJSON() ([]byte, error) { + type Alias SpanModel + + var timestamp int64 + if !s.Timestamp.IsZero() { + if s.Timestamp.Unix() < 1 { + // Zipkin does not allow Timestamps before Unix epoch + return nil, ErrValidTimestampRequired + } + timestamp = s.Timestamp.Round(time.Microsecond).UnixNano() / 1e3 + } + + if s.Duration < time.Microsecond { + if s.Duration < 0 { + // negative duration is not allowed and signals a timing logic error + return nil, ErrValidDurationRequired + } else if s.Duration > 0 { + // sub microsecond durations are reported as 1 microsecond + s.Duration = 1 * time.Microsecond + } + } else { + // Duration will be rounded to nearest microsecond representation. + // + // NOTE: Duration.Round() is not available in Go 1.8 which we still support. + // To handle microsecond resolution rounding we'll add 500 nanoseconds to + // the duration. When truncated to microseconds in the call to marshal, it + // will be naturally rounded. See TestSpanDurationRounding in span_test.go + s.Duration += 500 * time.Nanosecond + } + + s.Name = strings.ToLower(s.Name) + + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + + return json.Marshal(&struct { + T int64 `json:"timestamp,omitempty"` + D int64 `json:"duration,omitempty"` + Alias + }{ + T: timestamp, + D: s.Duration.Nanoseconds() / 1e3, + Alias: (Alias)(s), + }) +} + +// UnmarshalJSON imports our Model from a Zipkin V2 API compatible span +// representation. +func (s *SpanModel) UnmarshalJSON(b []byte) error { + type Alias SpanModel + span := &struct { + T uint64 `json:"timestamp,omitempty"` + D uint64 `json:"duration,omitempty"` + *Alias + }{ + Alias: (*Alias)(s), + } + if err := json.Unmarshal(b, &span); err != nil { + return err + } + if s.ID < 1 { + return ErrValidIDRequired + } + if span.T > 0 { + s.Timestamp = time.Unix(0, int64(span.T)*1e3) + } + s.Duration = time.Duration(span.D*1e3) * time.Nanosecond + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go new file mode 100644 index 00000000..acd72ea7 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go @@ -0,0 +1,44 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// ID type +type ID uint64 + +// String outputs the 64-bit ID as hex string. +func (i ID) String() string { + return fmt.Sprintf("%016x", uint64(i)) +} + +// MarshalJSON serializes an ID type (SpanID, ParentSpanID) to HEX. +func (i ID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", i.String())), nil +} + +// UnmarshalJSON deserializes an ID type (SpanID, ParentSpanID) from HEX. +func (i *ID) UnmarshalJSON(b []byte) (err error) { + var id uint64 + if len(b) < 3 { + return nil + } + id, err = strconv.ParseUint(string(b[1:len(b)-1]), 16, 64) + *i = ID(id) + return err +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go new file mode 100644 index 00000000..dca65535 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go @@ -0,0 +1,75 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). +// In case of 64 bit traceIDs, the value can be found in Low. +type TraceID struct { + High uint64 + Low uint64 +} + +// Empty returns if TraceID has zero value. +func (t TraceID) Empty() bool { + return t.Low == 0 && t.High == 0 +} + +// String outputs the 128-bit traceID as hex string. +func (t TraceID) String() string { + if t.High == 0 { + return fmt.Sprintf("%016x", t.Low) + } + return fmt.Sprintf("%016x%016x", t.High, t.Low) +} + +// TraceIDFromHex returns the TraceID from a hex string. +func TraceIDFromHex(h string) (t TraceID, err error) { + if len(h) > 16 { + if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { + return + } + t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) + return + } + t.Low, err = strconv.ParseUint(h, 16, 64) + return +} + +// MarshalJSON custom JSON serializer to export the TraceID in the required +// zero padded hex representation. +func (t TraceID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", t.String())), nil +} + +// UnmarshalJSON custom JSON deserializer to retrieve the traceID from the hex +// encoded representation. +func (t *TraceID) UnmarshalJSON(traceID []byte) error { + if len(traceID) < 3 { + return ErrValidTraceIDRequired + } + // A valid JSON string is encoded wrapped in double quotes. We need to trim + // these before converting the hex payload. + tID, err := TraceIDFromHex(string(traceID[1 : len(traceID)-1])) + if err != nil { + return err + } + *t = tID + return nil +} diff --git a/vendor/github.com/vito/go-sse/LICENSE.md b/vendor/github.com/vito/go-sse/LICENSE.md new file mode 100644 index 00000000..5c304d1a --- /dev/null +++ b/vendor/github.com/vito/go-sse/LICENSE.md @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vito/go-sse/sse/errors.go b/vendor/github.com/vito/go-sse/sse/errors.go new file mode 100644 index 00000000..5cf74bc0 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/errors.go @@ -0,0 +1,5 @@ +package sse + +import "errors" + +var ErrSourceClosed = errors.New("source closed") diff --git a/vendor/github.com/vito/go-sse/sse/event.go b/vendor/github.com/vito/go-sse/sse/event.go new file mode 100644 index 00000000..09cc3d25 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/event.go @@ -0,0 +1,129 @@ +package sse + +import ( + "bytes" + "io" + "strconv" + "time" +) + +// Define byte slice constants used in encoding/writing +var ( + idPrefix = []byte("id: ") + eventPrefix = []byte("event: ") + retryPrefix = []byte("retry: ") + dataPrefix = []byte("data: ") + emptyData = []byte("data") + newline = []byte("\n") +) + +// Event represents a Server-Sent Event +type Event struct { + ID string + Name string + Data []byte + Retry time.Duration +} + +func (event Event) Encode() string { + // Make an educated capacity estimate + capacity := 8 + len(event.ID) + 8 + len(event.Name) + 20 + dataLines := bytes.Count(event.Data, newline) + 1 + capacity += len(event.Data) + (dataLines * 7) + + var buf bytes.Buffer + buf.Grow(capacity) + + // Use the pre-defined constants + buf.Write(idPrefix) + buf.WriteString(event.ID) + buf.Write(newline) + + buf.Write(eventPrefix) + buf.WriteString(event.Name) + buf.Write(newline) + + if event.Retry != 0 { + buf.Write(retryPrefix) + buf.WriteString(strconv.FormatInt(int64(event.Retry/1000/1000), 10)) + buf.Write(newline) + } + + for _, line := range bytes.Split(event.Data, []byte("\n")) { + if len(line) == 0 { + buf.Write(emptyData) + } else { + buf.Write(dataPrefix) + buf.Write(line) + } + buf.Write(newline) + } + + buf.Write(newline) + + return buf.String() +} + +func (event Event) Write(destination io.Writer) error { + // Write id + if _, err := destination.Write(idPrefix); err != nil { + return err + } + if _, err := destination.Write([]byte(event.ID)); err != nil { + return err + } + if _, err := destination.Write(newline); err != nil { + return err + } + + // Write event + if _, err := destination.Write(eventPrefix); err != nil { + return err + } + if _, err := destination.Write([]byte(event.Name)); err != nil { + return err + } + if _, err := destination.Write(newline); err != nil { + return err + } + + // Write retry if present + if event.Retry != 0 { + if _, err := destination.Write(retryPrefix); err != nil { + return err + } + + retryValue := strconv.FormatInt(int64(event.Retry/1000/1000), 10) + if _, err := destination.Write([]byte(retryValue)); err != nil { + return err + } + + if _, err := destination.Write(newline); err != nil { + return err + } + } + + // Write data lines + for _, line := range bytes.Split(event.Data, []byte("\n")) { + if len(line) == 0 { + if _, err := destination.Write(emptyData); err != nil { + return err + } + } else { + if _, err := destination.Write(dataPrefix); err != nil { + return err + } + if _, err := destination.Write(line); err != nil { + return err + } + } + + if _, err := destination.Write(newline); err != nil { + return err + } + } + + // Final newline + _, err := destination.Write(newline) + return err +} diff --git a/vendor/github.com/vito/go-sse/sse/event_source.go b/vendor/github.com/vito/go-sse/sse/event_source.go new file mode 100644 index 00000000..faba5441 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/event_source.go @@ -0,0 +1,278 @@ +package sse + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" +) + +// EventSource behaves like the EventSource interface from the Server-Sent +// Events spec implemented in many browsers. See +// http://www.w3.org/TR/eventsource/#the-eventsource-interface for details. +// +// To use, optionally call Connect(), and then call Next(). If Next() is called +// prior to Connect(), it will connect for you. +// +// Alternatively, create a Config struct instance and call Connect() and then call +// Next(). +// +// Next() is often called asynchronously in a loop so that the event source can +// be closed. Next() will block on reading from the server. +// +// If Close() is called while reading an event, Next() will return early, and +// subsequent calls to Next() will return early. To read new events, Connect() +// must be called. +// +// If an EOF is received, Next() returns io.EOF, and subsequent calls to Next() +// will return early. To read new events, Connect() must be called. +type EventSource struct { + client Doer + createRequest func() *http.Request + + currentReadCloser *ReadCloser + lastEventID string + lock sync.Mutex + + closeOnce *sync.Once + closed chan struct{} + + retryInterval time.Duration + maxRetries uint16 +} + +type Doer interface { + Do(*http.Request) (*http.Response, error) +} + +type BadResponseError struct { + Response *http.Response +} + +func (err BadResponseError) Error() string { + return fmt.Sprintf("bad response from event source: %s", err.Response.Status) +} + +type RetryParams struct { + RetryInterval time.Duration + MaxRetries uint16 +} + +type Config struct { + Client Doer + RetryParams RetryParams + RequestCreator func() *http.Request +} + +func (c *Config) Connect() (*EventSource, error) { + client := c.Client + if client == nil { + client = http.DefaultClient + } + source := createEventSource(client, c.RetryParams, c.RequestCreator) + + readCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.currentReadCloser = readCloser + + return source, nil +} + +func NewEventSource(client Doer, defaultRetryInterval time.Duration, requestCreator func() *http.Request) *EventSource { + retryParams := RetryParams{ + RetryInterval: defaultRetryInterval, + } + return createEventSource(client, retryParams, requestCreator) +} + +func createEventSource(client Doer, retryParams RetryParams, requestCreator func() *http.Request) *EventSource { + return &EventSource{ + client: client, + createRequest: requestCreator, + + closeOnce: new(sync.Once), + closed: make(chan struct{}), + retryInterval: retryParams.RetryInterval, + maxRetries: retryParams.MaxRetries, + } +} + +func Connect(client Doer, defaultRetryInterval time.Duration, requestCreator func() *http.Request) (*EventSource, error) { + source := NewEventSource(client, defaultRetryInterval, requestCreator) + + readCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.currentReadCloser = readCloser + + return source, nil +} + +func (source *EventSource) Next() (Event, error) { + select { + case <-source.closed: + return Event{}, ErrSourceClosed + default: + } + + for { + readCloser, err := source.ensureReadCloser() + if err != nil { + return Event{}, err + } + + event, err := readCloser.Next() + if err == nil { + source.lastEventID = event.ID + + if event.Retry != 0 { + source.retryInterval = event.Retry + } + + return event, nil + } + + if err == io.EOF { + return Event{}, err + } + + readCloser.Close() + + if err := source.waitForRetry(); err != nil { + return Event{}, err + } + } + + panic("unreachable") +} + +func (source *EventSource) Close() error { + source.lock.Lock() + defer source.lock.Unlock() + + source.closeOnce.Do(func() { + close(source.closed) + }) + + if source.currentReadCloser != nil { + err := source.currentReadCloser.Close() + if err != nil { + return err + } + + source.currentReadCloser = nil + } + + return nil +} + +func (source *EventSource) ensureReadCloser() (*ReadCloser, error) { + source.lock.Lock() + + if source.currentReadCloser == nil { + source.lock.Unlock() + + newReadCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.lock.Lock() + + select { + case <-source.closed: + source.lock.Unlock() + newReadCloser.Close() + return nil, ErrSourceClosed + + default: + source.currentReadCloser = newReadCloser + } + } + + readCloser := source.currentReadCloser + + source.lock.Unlock() + + return readCloser, nil +} + +func (source *EventSource) establishConnection() (*ReadCloser, error) { + var connectionRetries uint16 + for { + req := source.createRequest() + + if req.Header == nil { + req.Header = http.Header{} + } + + if source.lastEventID != "" { + req.Header.Set("Last-Event-ID", source.lastEventID) + } + + res, err := source.client.Do(req) + if err != nil { + connectionRetries++ + if !source.shouldRetry(connectionRetries) { + return nil, err + } + err := source.waitForRetry() + if err != nil { + return nil, err + } + + continue + } + + switch res.StatusCode { + case http.StatusOK: + return NewReadCloser(res.Body), nil + + // reestablish the connection + case http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + res.Body.Close() + + err := source.waitForRetry() + if err != nil { + return nil, err + } + + continue + + // fail the connection + default: + res.Body.Close() + + return nil, BadResponseError{ + Response: res, + } + } + } +} + +func (source *EventSource) waitForRetry() error { + source.lock.Lock() + source.currentReadCloser = nil + source.lock.Unlock() + + select { + case <-time.After(source.retryInterval): + return nil + case <-source.closed: + return ErrSourceClosed + } +} + +func (source *EventSource) shouldRetry(retries uint16) bool { + return source.maxRetries == 0 || + (source.maxRetries > 0 && retries <= source.maxRetries) +} diff --git a/vendor/github.com/vito/go-sse/sse/read_closer.go b/vendor/github.com/vito/go-sse/sse/read_closer.go new file mode 100644 index 00000000..066d7c92 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/read_closer.go @@ -0,0 +1,122 @@ +package sse + +import ( + "bufio" + "bytes" + "errors" + "io" + "strconv" + "time" +) + +type ReadCloser struct { + lastID string + + buf *bufio.Reader + closeSource func() error + closed bool +} + +func NewReadCloser(source io.ReadCloser) *ReadCloser { + return &ReadCloser{ + closeSource: func() error { return source.Close() }, + buf: bufio.NewReader(source), + } +} + +var alreadyClosedError = errors.New("ReadCloser already closed") + +func (rc *ReadCloser) Close() error { + if rc.closed { + return alreadyClosedError + } + + rc.closed = true + + return rc.closeSource() +} + +func (rc *ReadCloser) Next() (Event, error) { + var event Event + + // event ID defaults to last ID per the spec + event.ID = rc.lastID + + // if an empty id is explicitly given, it sets the value and resets the last + // id; track its presence with a bool to distinguish between zero-value + idPresent := false + + prefix := []byte{} + for { + line, isPrefix, err := rc.buf.ReadLine() + if err != nil { + return Event{}, err + } + + line = append(prefix, line...) + + if isPrefix { + prefix = line + continue + } else { + prefix = []byte{} + } + + // empty line; dispatch event + if len(line) == 0 { + if len(event.Data) == 0 { + // event had no data; skip it per the spec + continue + } + + if idPresent { + // record last ID + rc.lastID = event.ID + } + + // trim terminating linebreak + event.Data = event.Data[0 : len(event.Data)-1] + + // dispatch event + return event, nil + } + + if line[0] == ':' { + // comment; skip + continue + } + + var field, value string + + segments := bytes.SplitN(line, []byte(":"), 2) + if len(segments) == 1 { + // line with no colon is just the field, with empty value + field = string(segments[0]) + } else { + field = string(segments[0]) + value = string(segments[1]) + } + + if len(value) > 0 { + // trim only a single leading space + if value[0] == ' ' { + value = value[1:] + } + } + + switch field { + case "id": + idPresent = true + event.ID = value + case "event": + event.Name = value + case "data": + event.Data = append(event.Data, []byte(value+"\n")...) + case "retry": + retryInMS, err := strconv.Atoi(value) + if err == nil { + event.Retry = time.Duration(retryInMS) * time.Millisecond + } + } + } +} diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml new file mode 100644 index 00000000..9a2ed4a9 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.codecov.yml @@ -0,0 +1,14 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 90% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore new file mode 100644 index 00000000..dd7bcf51 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log +coverage.txt + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md new file mode 100644 index 00000000..f421056a --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CHANGELOG.md @@ -0,0 +1,52 @@ +# Changelog + +## v1.6.0 (2024-07-24) + +- Add RoundQuotaFunc option that allows configuration of rounding + behavior for floating point CPU quota. + +## v1.5.3 (2023-07-19) + +- Fix mountinfo parsing when super options have fields with spaces. +- Fix division by zero while parsing cgroups. + +## v1.5.2 (2023-03-16) + +- Support child control cgroups +- Fix file descriptor leak +- Update dependencies + +## v1.5.1 (2022-04-06) + +- Fix cgroups v2 mountpoint detection. + +## v1.5.0 (2022-04-05) + +- Add support for cgroups v2. + +Thanks to @emadolsky for their contribution to this release. + +## v1.4.0 (2021-02-01) + +- Support colons in cgroup names. +- Remove linters from runtime dependencies. + +## v1.3.0 (2020-01-23) + +- Migrate to Go modules. + +## v1.2.0 (2018-02-22) + +- Fixed quota clamping to always round down rather than up; Rather than + guaranteeing constant throttling at saturation, instead assume that the + fractional CPU was added as a hedge for factors outside of Go's scheduler. + +## v1.1.0 (2017-11-10) + +- Log the new value of `GOMAXPROCS` rather than the current value. +- Make logs more explicit about whether `GOMAXPROCS` was modified or not. +- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. + +## v1.0.0 (2017-08-09) + +- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..e327d9aa --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md new file mode 100644 index 00000000..2b6a6040 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help improving this package! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/automaxprocs.git +cd automaxprocs +git remote add upstream https://github.com/uber-go/automaxprocs.git +git fetch upstream +``` + +Install the test dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/automaxprocs +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/automaxprocs/fork +[open-issue]: https://github.com/uber-go/automaxprocs/issues/new +[cla]: https://cla-assistant.io/uber-go/automaxprocs +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE new file mode 100644 index 00000000..20dcf51d --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile new file mode 100644 index 00000000..1642b714 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/Makefile @@ -0,0 +1,46 @@ +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -race ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): tools/go.mod + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): tools/go.mod + cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking gofmt" + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking go vet" + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking golint" + @$(GOLINT) ./... | tee -a lint.log + @echo "Checking staticcheck" + @$(STATICCHECK) ./... 2>&1 | tee -a lint.log + @echo "Checking for license headers..." + @./.build/check_license.sh | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md new file mode 100644 index 00000000..bfed32ad --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/README.md @@ -0,0 +1,71 @@ +# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Automatically set `GOMAXPROCS` to match Linux container CPU quota. + +## Installation + +`go get -u go.uber.org/automaxprocs` + +## Quick Start + +```go +import _ "go.uber.org/automaxprocs" + +func main() { + // Your application logic here. +} +``` + +# Performance +Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): + +| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | +| ------------------ | --------- | -------- | ---------- | +| 1 | 28,893.18 | 1.46 | 19.70 | +| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | +| 3 | 44,212.93 | 0.66 | 30.07 | +| 4 | 41,071.15 | 0.57 | 42.94 | +| 8 | 33,111.69 | 0.43 | 64.32 | +| Default (24) | 22,191.40 | 0.45 | 76.19 | + +When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. + +When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: + +``` +$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat +nr_periods 42227334 +nr_throttled 131923 +throttled_time 88613212216618 +``` + +Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +automaxprocs to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep +an eye on issues and pull requests, but you can also report any negative +conduct to oss-conduct@uber.com. That email list is a private, safe space; +even the automaxprocs maintainers don't have access, so don't hesitate to hold +us to a high standard. + +
+ +Released under the [MIT License](LICENSE). + +[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg +[doc]: https://godoc.org/go.uber.org/automaxprocs +[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/automaxprocs + + diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go new file mode 100644 index 00000000..69946a3e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/automaxprocs.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package automaxprocs automatically sets GOMAXPROCS to match the Linux +// container CPU quota, if any. +package automaxprocs // import "go.uber.org/automaxprocs" + +import ( + "log" + + "go.uber.org/automaxprocs/maxprocs" +) + +func init() { + maxprocs.Set(maxprocs.Logger(log.Printf)) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go new file mode 100644 index 00000000..fe4ecf56 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go new file mode 100644 index 00000000..e89f5436 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -0,0 +1,118 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota + // parameter. + _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us" + // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period + // parameter. + _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup controller. +// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of +// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`. +func (cg CGroups) CPUQuota() (float64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysCPU] + if !exists { + return -1, false, nil + } + + cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam) + if defined := cfsQuotaUs > 0; err != nil || !defined { + return -1, defined, err + } + + cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) + if defined := cfsPeriodUs > 0; err != nil || !defined { + return -1, defined, err + } + + return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go new file mode 100644 index 00000000..78556062 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go @@ -0,0 +1,176 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" +) + +const ( + // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period + // parameter. + _cgroupv2CPUMax = "cpu.max" + // _cgroupFSType is the Linux CGroup-V2 file system type used in + // `/proc/$PID/mountinfo`. + _cgroupv2FSType = "cgroup2" + + _cgroupv2MountPoint = "/sys/fs/cgroup" + + _cgroupV2CPUMaxDefaultPeriod = 100000 + _cgroupV2CPUMaxQuotaMax = "max" +) + +const ( + _cgroupv2CPUMaxQuotaIndex = iota + _cgroupv2CPUMaxPeriodIndex +) + +// ErrNotV2 indicates that the system is not using cgroups2. +var ErrNotV2 = errors.New("not using cgroups2") + +// CGroups2 provides access to cgroups data for systems using cgroups2. +type CGroups2 struct { + mountPoint string + groupPath string + cpuMaxFile string +} + +// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process. +// +// This returns ErrNotV2 if the system is not using cgroups2. +func NewCGroups2ForCurrentProcess() (*CGroups2, error) { + return newCGroups2From(_procPathMountInfo, _procPathCGroup) +} + +func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) { + isV2, err := isCGroupV2(mountInfoPath) + if err != nil { + return nil, err + } + + if !isV2 { + return nil, ErrNotV2 + } + + subsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + // Find v2 subsystem by looking for the `0` id + var v2subsys *CGroupSubsys + for _, subsys := range subsystems { + if subsys.ID == 0 { + v2subsys = subsys + break + } + } + + if v2subsys == nil { + return nil, ErrNotV2 + } + + return &CGroups2{ + mountPoint: _cgroupv2MountPoint, + groupPath: v2subsys.Name, + cpuMaxFile: _cgroupv2CPUMax, + }, nil +} + +func isCGroupV2(procPathMountInfo string) (bool, error) { + var ( + isV2 bool + newMountPoint = func(mp *MountPoint) error { + isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint) + return nil + } + ) + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return false, err + } + + return isV2, nil +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller. +// It is a result of reading cpu quota and period from cpu.max file. +// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns +// (-1, false, nil) +func (cg *CGroups2) CPUQuota() (float64, bool, error) { + cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, false, nil + } + return -1, false, err + } + defer cpuMaxParams.Close() + + scanner := bufio.NewScanner(cpuMaxParams) + if scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 || len(fields) > 2 { + return -1, false, fmt.Errorf("invalid format") + } + + if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax { + return -1, false, nil + } + + max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex]) + if err != nil { + return -1, false, err + } + + var period int + if len(fields) == 1 { + period = _cgroupV2CPUMaxDefaultPeriod + } else { + period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex]) + if err != nil { + return -1, false, err + } + + if period == 0 { + return -1, false, errors.New("zero value for period is not allowed") + } + } + + return float64(max) / float64(period), true, nil + } + + if err := scanner.Err(); err != nil { + return -1, false, err + } + + return 0, false, io.ErrUnexpectedEOF +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go new file mode 100644 index 00000000..113555f6 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (CPU quota, for example) for a given process. +package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go new file mode 100644 index 00000000..94ac75a4 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -0,0 +1,52 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go new file mode 100644 index 00000000..f3877f78 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -0,0 +1,171 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + // End of optional fields. + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + // Now we know where the optional fields end, split the line again with a + // limit to avoid issues with spaces in super options as present on WSL. + fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf) + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go new file mode 100644 index 00000000..cddc3eae --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -0,0 +1,103 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.SplitN(line, _cgroupSep, _csFieldCount) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go new file mode 100644 index 00000000..f9057fd2 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -0,0 +1,75 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package runtime + +import ( + "errors" + + cg "go.uber.org/automaxprocs/internal/cgroups" +) + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. The quota is converted from float to int using round. +// If round == nil, DefaultRoundFunc is used. +func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) { + if round == nil { + round = DefaultRoundFunc + } + cgroups, err := _newQueryer() + if err != nil { + return -1, CPUQuotaUndefined, err + } + + quota, defined, err := cgroups.CPUQuota() + if !defined || err != nil { + return -1, CPUQuotaUndefined, err + } + + maxProcs := round(quota) + if minValue > 0 && maxProcs < minValue { + return minValue, CPUQuotaMinUsed, nil + } + return maxProcs, CPUQuotaUsed, nil +} + +type queryer interface { + CPUQuota() (float64, bool, error) +} + +var ( + _newCgroups2 = cg.NewCGroups2ForCurrentProcess + _newCgroups = cg.NewCGroupsForCurrentProcess + _newQueryer = newQueryer +) + +func newQueryer() (queryer, error) { + cgroups, err := _newCgroups2() + if err == nil { + return cgroups, nil + } + if errors.Is(err, cg.ErrNotV2) { + return _newCgroups() + } + return nil, err +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go new file mode 100644 index 00000000..e7470150 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -0,0 +1,31 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !linux +// +build !linux + +package runtime + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the +// current OS. +func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) { + return -1, CPUQuotaUndefined, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go new file mode 100644 index 00000000..f8a2834a --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package runtime + +import "math" + +// CPUQuotaStatus presents the status of how CPU quota is used +type CPUQuotaStatus int + +const ( + // CPUQuotaUndefined is returned when CPU quota is undefined + CPUQuotaUndefined CPUQuotaStatus = iota + // CPUQuotaUsed is returned when a valid CPU quota can be used + CPUQuotaUsed + // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value + CPUQuotaMinUsed +) + +// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor). +func DefaultRoundFunc(v float64) int { + return int(math.Floor(v)) +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go new file mode 100644 index 00000000..e561fe60 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -0,0 +1,139 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package maxprocs // import "go.uber.org/automaxprocs/maxprocs" + +import ( + "os" + "runtime" + + iruntime "go.uber.org/automaxprocs/internal/runtime" +) + +const _maxProcsKey = "GOMAXPROCS" + +func currentMaxProcs() int { + return runtime.GOMAXPROCS(0) +} + +type config struct { + printf func(string, ...interface{}) + procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +func (c *config) log(fmt string, args ...interface{}) { + if c.printf != nil { + c.printf(fmt, args...) + } +} + +// An Option alters the behavior of Set. +type Option interface { + apply(*config) +} + +// Logger uses the supplied printf implementation for log output. By default, +// Set doesn't log anything. +func Logger(printf func(string, ...interface{})) Option { + return optionFunc(func(cfg *config) { + cfg.printf = printf + }) +} + +// Min sets the minimum GOMAXPROCS value that will be used. +// Any value below 1 is ignored. +func Min(n int) Option { + return optionFunc(func(cfg *config) { + if n >= 1 { + cfg.minGOMAXPROCS = n + } + }) +} + +// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. +func RoundQuotaFunc(rf func(v float64) int) Option { + return optionFunc(func(cfg *config) { + cfg.roundQuotaFunc = rf + }) +} + +type optionFunc func(*config) + +func (of optionFunc) apply(cfg *config) { of(cfg) } + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set(opts ...Option) (func(), error) { + cfg := &config{ + procs: iruntime.CPUQuotaToGOMAXPROCS, + roundQuotaFunc: iruntime.DefaultRoundFunc, + minGOMAXPROCS: 1, + } + for _, o := range opts { + o.apply(cfg) + } + + undoNoop := func() { + cfg.log("maxprocs: No GOMAXPROCS change to reset") + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if max, exists := os.LookupEnv(_maxProcsKey); exists { + cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) + return undoNoop, nil + } + + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return undoNoop, err + } + + if status == iruntime.CPUQuotaUndefined { + cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) + return undoNoop, nil + } + + prev := currentMaxProcs() + undo := func() { + cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) + runtime.GOMAXPROCS(prev) + } + + switch status { + case iruntime.CPUQuotaMinUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) + case iruntime.CPUQuotaUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) + } + + runtime.GOMAXPROCS(maxProcs) + return undo, nil +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go new file mode 100644 index 00000000..cc7fc5ae --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package maxprocs + +// Version is the current package version. +const Version = "1.6.0" diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 00000000..47a9a541 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 00000000..1da4a361 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,286 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// This representation is sometimes called a "balanced parenthesis tree." +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" + _ "unsafe" + + "golang.org/x/tools/internal/astutil/edge" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +//go:linkname events +func events(in *Inspector) []event { return in.events } + +//go:linkname packEdgeKindAndIndex +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +// +//go:linkname unpackEdgeKindAndIndex +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int32 // index of corresponding push or pop event + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent + } + for _, file := range files { + walk(v, edge.Invalid, -1, file) + } + return v.events +} + +type visitor struct { + events []event + stack []item +} + +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendents + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") + } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] + + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 00000000..c576dc70 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 00000000..97784484 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,230 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" + + _ "unsafe" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +//go:linkname maskOf +func maskOf(nodes []ast.Node) uint64 { + if len(nodes) == 0 { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 00000000..5a42174a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/internal/astutil/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go new file mode 100644 index 00000000..4f6ccfd6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c04e231d..b0c4b331 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,17 @@ +# code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 +## explicit +code.cloudfoundry.org/bbs +code.cloudfoundry.org/bbs/encryption +code.cloudfoundry.org/bbs/events +code.cloudfoundry.org/bbs/format +code.cloudfoundry.org/bbs/models +code.cloudfoundry.org/bbs/trace # code.cloudfoundry.org/bytefmt v0.34.0 ## explicit; go 1.23.0 code.cloudfoundry.org/bytefmt +# code.cloudfoundry.org/cfhttp/v2 v2.44.0 +## explicit; go 1.23.0 +code.cloudfoundry.org/cfhttp/v2 # code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 ## explicit code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking @@ -75,6 +86,12 @@ code.cloudfoundry.org/jsonry code.cloudfoundry.org/jsonry/internal/errorcontext code.cloudfoundry.org/jsonry/internal/path code.cloudfoundry.org/jsonry/internal/tree +# code.cloudfoundry.org/lager/v3 v3.0.3 +## explicit; go 1.19 +code.cloudfoundry.org/lager/v3 +code.cloudfoundry.org/lager/v3/internal/truncate +# code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 +## explicit # code.cloudfoundry.org/tlsconfig v0.22.0 ## explicit; go 1.23.0 code.cloudfoundry.org/tlsconfig @@ -144,9 +161,18 @@ github.com/fxamacker/cbor/v2 # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr +# github.com/go-sql-driver/mysql v1.9.2 +## explicit; go 1.21.0 +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 +# github.com/go-test/deep v1.1.1 +## explicit; go 1.16 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/google/go-cmp v0.7.0 ## explicit; go 1.21 @@ -159,11 +185,16 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource +# github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 +## explicit; go 1.23.0 +github.com/google/pprof/profile # github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities +# github.com/jackc/pgx/v5 v5.7.4 +## explicit; go 1.21 # github.com/jessevdk/go-flags v1.6.1 ## explicit; go 1.20 github.com/jessevdk/go-flags @@ -224,6 +255,24 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types +# github.com/onsi/ginkgo/v2 v2.23.4 +## explicit; go 1.23.0 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types # github.com/onsi/gomega v1.37.0 ## explicit; go 1.23.0 github.com/onsi/gomega @@ -238,6 +287,10 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/openzipkin/zipkin-go v0.4.2 +## explicit; go 1.18 +github.com/openzipkin/zipkin-go/idgenerator +github.com/openzipkin/zipkin-go/model # github.com/prometheus/client_golang v1.22.0 ## explicit; go 1.22 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -279,12 +332,21 @@ github.com/tedsuo/rata ## explicit; go 1.12 github.com/vito/go-interact/interact github.com/vito/go-interact/interact/terminal +# github.com/vito/go-sse v1.1.3 +## explicit; go 1.24 +github.com/vito/go-sse/sse # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 # github.com/xhit/go-str2duration/v2 v2.1.0 ## explicit; go 1.13 github.com/xhit/go-str2duration/v2 +# go.uber.org/automaxprocs v1.6.0 +## explicit; go 1.20 +go.uber.org/automaxprocs +go.uber.org/automaxprocs/internal/cgroups +go.uber.org/automaxprocs/internal/runtime +go.uber.org/automaxprocs/maxprocs # golang.org/x/crypto v0.38.0 ## explicit; go 1.23.0 golang.org/x/crypto/blowfish @@ -347,6 +409,11 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.11.0 ## explicit; go 1.23.0 golang.org/x/time/rate +# golang.org/x/tools v0.32.0 +## explicit; go 1.23.0 +golang.org/x/tools/cover +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/internal/astutil/edge # google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api From 1f78e351bfe1a66a4580dd9714f2b23ed30c6455 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:34:46 +0200 Subject: [PATCH 02/13] remove usage of /v2/spaces/:space_guid/summary --- collectors/applications.go | 66 ++++++++++++++++++++----------------- fetcher/fetcher.go | 1 + fetcher/fetcher_handlers.go | 39 +++++++--------------- filters/filters.go | 7 ++-- main.go | 3 -- models/model.go | 7 ++-- 6 files changed, 55 insertions(+), 68 deletions(-) diff --git a/collectors/applications.go b/collectors/applications.go index b195c68e..3abc84b4 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -53,7 +53,7 @@ func NewApplicationsCollector( Help: "Buildpack used by an Application.", ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment}, }, - []string{"application_id", "application_name", "buildpack_name"}, + []string{"application_id", "application_name", "buildpack_name", "detected_buildpack"}, ) applicationInstancesMetric := prometheus.NewGaugeVec( @@ -242,30 +242,37 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m return fmt.Errorf("could not find org with guid '%s'", orgRel.GUID) } - appSum, ok := objs.AppSummaries[application.GUID] - if !ok { - return fmt.Errorf("could not find app summary with guid '%s'", application.GUID) - } - - // 1. - detectedBuildpack := appSum.DetectedBuildpack - if len(detectedBuildpack) == 0 { - detectedBuildpack = appSum.Buildpack - } - - // 2. - buildpack := appSum.Buildpack - if len(buildpack) == 0 { - buildpack = appSum.DetectedBuildpack + detectedBuildpack := "" + buildpack := "" + stackGUID := "" + for _, stack := range objs.Stacks { + if stack.Name == application.Lifecycle.Data.Stack { + stackGUID = stack.GUID + break + } } - - // 3. Use the droplet data for the buildpack metric - for _, bp := range application.Lifecycle.Data.Buildpacks { - c.applicationBuildpackMetric.WithLabelValues( - application.GUID, - application.Name, - bp, - ).Set(float64(1)) + if dropletGUID := application.Relationships[constant.RelationshipTypeCurrentDroplet].GUID; dropletGUID != "" { + if droplet, ok := objs.Droplets[dropletGUID]; ok { + // 1. + detectedBuildpack = droplet.Buildpacks[0].DetectOutput + // 2. + buildpack = droplet.Buildpacks[0].BuildpackName + if len(detectedBuildpack) == 0 { + detectedBuildpack = buildpack + } + if len(buildpack) == 0 { + buildpack = detectedBuildpack + } + // 3.Use the droplet data for the buildpack metric + for _, bp := range droplet.Buildpacks { + c.applicationBuildpackMetric.WithLabelValues( + application.GUID, + application.Name, + bp.BuildpackName, + bp.DetectOutput, + ).Set(float64(1)) + } + } } c.applicationInfoMetric.WithLabelValues( @@ -277,7 +284,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m organization.Name, space.GUID, space.Name, - appSum.StackID, + stackGUID, string(application.State), ).Set(float64(1)) @@ -291,15 +298,14 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m string(application.State), ).Set(float64(process.Instances.Value)) - runningInstances := appSum.RunningInstances // Use bbs data if available + runningInstances := 0 if len(objs.ProcessActualLRPs) > 0 { - runningsInstances := 0 - lrps, ok := objs.ProcessActualLRPs[process.GUID] + LRPs, ok := objs.ProcessActualLRPs[process.GUID] if ok { - for _, lrp := range lrps { + for _, lrp := range LRPs { if lrp.State == "RUNNING" { - runningsInstances++ + runningInstances++ } } } diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index d1a86507..5e1d9aa5 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -67,6 +67,7 @@ func (c *Fetcher) workInit() { c.worker.PushIf("spaces", c.fetchSpaces, filters.Applications, filters.Spaces) c.worker.PushIf("space_quotas", c.fetchSpaceQuotas, filters.Spaces) c.worker.PushIf("applications", c.fetchApplications, filters.Applications) + c.worker.PushIf("droplets", c.fetchDroplets, filters.Droplets) c.worker.PushIf("domains", c.fetchDomains, filters.Domains) c.worker.PushIf("process", c.fetchProcesses, filters.Applications) c.worker.PushIf("routes", c.fetchRoutes, filters.Routes) diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index c5bdad2d..be7a0787 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -1,7 +1,6 @@ package fetcher import ( - "fmt" "regexp" "time" @@ -9,7 +8,6 @@ import ( "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" "code.cloudfoundry.org/cli/resources" - "github.com/cloudfoundry/cf_exporter/filters" "github.com/cloudfoundry/cf_exporter/models" log "github.com/sirupsen/logrus" ) @@ -74,33 +72,10 @@ func (c *Fetcher) fetchOrgQuotas(session *SessionExt, _ *BBSClient, entry *model // summary fetching attempt. See cloudfoundry/cf_exporter#85 func (c *Fetcher) fetchSpaces(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { spaces, _, _, err := session.V3().GetSpaces(LargeQuery) - if err != nil { - return err - } - - loadIndex(entry.Spaces, spaces, func(r resources.Space) string { return r.GUID }) - total := len(spaces) - for idx := 0; idx < total; idx++ { - space := spaces[idx] - name := fmt.Sprintf("space_summaries %04d/%04d (%s)", idx, total, space.GUID) - c.worker.PushIf(name, func(session *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { - spaceSum, err := session.GetSpaceSummary(space.GUID) - if err == nil { - c.Lock() - entry.SpaceSummaries[spaceSum.GUID] = *spaceSum - for _, app := range spaceSum.Apps { - entry.AppSummaries[app.GUID] = app - } - c.Unlock() - } else { - log.WithError(err).Warnf("could not fetch space '%s' summary", space.GUID) - } - // 1 - return nil - }, filters.Applications) + if err == nil { + loadIndex(entry.Spaces, spaces, func(r resources.Space) string { return r.GUID }) } - - return nil + return err } func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { @@ -169,6 +144,14 @@ func (c *Fetcher) fetchSecurityGroups(session *SessionExt, _ *BBSClient, entry * return err } +func (c *Fetcher) fetchDroplets(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { + droplets, _, err := session.V3().GetDroplets(LargeQuery) + if err == nil { + loadIndex(entry.Droplets, droplets, func(r resources.Droplet) string { return r.GUID }) + } + return err +} + func (c *Fetcher) fetchStacks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { stacks, _, err := session.V3().GetStacks(LargeQuery) if err == nil { diff --git a/filters/filters.go b/filters/filters.go index b4d3c247..4969129d 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -7,6 +7,7 @@ import ( const ( Applications = "applications" + Droplets = "droplets" Buildpacks = "buildpacks" Domains = "domains" Events = "events" @@ -22,12 +23,12 @@ const ( Spaces = "spaces" Stacks = "stacks" Tasks = "tasks" - InstancesRunning = "instances_running" ) var ( All = []string{ Applications, + Droplets, Buildpacks, Domains, Events, @@ -54,6 +55,7 @@ func NewFilter(active ...string) (*Filter, error) { filter := &Filter{ activated: map[string]bool{ Applications: true, + Droplets: true, Buildpacks: true, Domains: true, IsolationSegments: true, @@ -69,7 +71,6 @@ func NewFilter(active ...string) (*Filter, error) { Stacks: true, Tasks: false, Events: false, - InstancesRunning: false, }, } @@ -86,6 +87,7 @@ func (f *Filter) setActive(active []string) error { // override default states with all disabled f.activated = map[string]bool{ Applications: false, + Droplets: false, Buildpacks: false, Domains: false, IsolationSegments: false, @@ -101,7 +103,6 @@ func (f *Filter) setActive(active []string) error { Stacks: false, Tasks: false, Events: false, - InstancesRunning: false, } // enable only given filters diff --git a/main.go b/main.go index d620ba7b..f522316c 100644 --- a/main.go +++ b/main.go @@ -195,9 +195,6 @@ func main() { SkipCertVerify: *bbsSkipSSLValidation, } - log.Infof("cfConfig: %+v", cfConfig) - log.Infof("bbsConfig: %+v", bbsConfig) - active := []string{} if len(*filterCollectors) != 0 { active = strings.Split(*filterCollectors, ",") diff --git a/models/model.go b/models/model.go index c7093fbe..a40a80ac 100644 --- a/models/model.go +++ b/models/model.go @@ -17,6 +17,7 @@ type CFObjects struct { Spaces map[string]resources.Space `json:"spaces"` SpaceQuotas map[string]Quota `json:"space_quotas"` Apps map[string]Application `json:"apps"` + Droplets map[string]resources.Droplet `json:"droplets"` Processes map[string]resources.Process `json:"process"` Tasks map[string]Task `json:"tasks"` Routes map[string]resources.Route `json:"routes"` @@ -26,13 +27,12 @@ type CFObjects struct { SecurityGroups map[string]resources.SecurityGroup `json:"security_groups"` Stacks map[string]resources.Stack `json:"stacks"` Buildpacks map[string]resources.Buildpack `json:"buildpacks"` + BuildpacksByName map[string]resources.Buildpack `json:"builpacks_by_name"` Domains map[string]resources.Domain `json:"domains"` ServiceBrokers map[string]resources.ServiceBroker `json:"service_brokers"` ServiceOfferings map[string]resources.ServiceOffering `json:"service_offerings"` ServicePlans map[string]resources.ServicePlan `json:"service_plans"` ServiceBindings map[string]resources.ServiceCredentialBinding `json:"service_bindings"` - SpaceSummaries map[string]SpaceSummary `json:"space_summaries"` - AppSummaries map[string]AppSummary `json:"app_summaries"` AppProcesses map[string][]resources.Process `json:"app_processes"` ProcessActualLRPs map[string][]*models.ActualLRP `json:"process_actual_lrps"` Events map[string]Event `json:"events"` @@ -159,6 +159,7 @@ func NewCFObjects() *CFObjects { Spaces: map[string]resources.Space{}, SpaceQuotas: map[string]Quota{}, Apps: map[string]Application{}, + Droplets: map[string]resources.Droplet{}, Processes: map[string]resources.Process{}, Tasks: map[string]Task{}, Routes: map[string]resources.Route{}, @@ -173,8 +174,6 @@ func NewCFObjects() *CFObjects { ServiceOfferings: map[string]resources.ServiceOffering{}, ServicePlans: map[string]resources.ServicePlan{}, ServiceBindings: map[string]resources.ServiceCredentialBinding{}, - SpaceSummaries: map[string]SpaceSummary{}, - AppSummaries: map[string]AppSummary{}, AppProcesses: map[string][]resources.Process{}, ProcessActualLRPs: map[string][]*models.ActualLRP{}, Users: map[string]resources.User{}, From 2ecf884eaa87b87ae085a0b0c5ed973d8f34deb7 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:35:04 +0200 Subject: [PATCH 03/13] upgrade cli to use new fields for apps and droplets --- fetcher/fetcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index 5e1d9aa5..7a791f09 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -85,7 +85,7 @@ func (c *Fetcher) workInit() { c.worker.PushIf("service_route_bindings", c.fetchServiceRouteBindings, filters.ServiceRouteBindings) c.worker.PushIf("users", c.fetchUsers, filters.Events) c.worker.PushIf("events", c.fetchEvents, filters.Events) - c.worker.PushIf("actual_lrps", c.fetchActualLRPs) + c.worker.Push("actual_lrps", c.fetchActualLRPs) } func (c *Fetcher) fetch() *models.CFObjects { From d7e78262511e4596f27d49092f22715522c6dc71 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:35:27 +0200 Subject: [PATCH 04/13] add filters and complete readme --- README.md | 8 ++++++++ fetcher/fetcher.go | 2 +- fetcher/fetcher_test.go | 24 ++++++++++++++++++++++++ filters/filters.go | 4 ++++ main.go | 2 +- 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 21c90b59..bcd03427 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,14 @@ usage: cf_exporter --cf.api_url=CF.API_URL --cf.deployment-name=CF.DEPLOYMENT-NA Flags: -h, --help Show context-sensitive help (also try --help-long and --help-man). + --bbs.api_url=BBS.API_URL BBS API URL ($CF_EXPORTER_BBS_API_URL) + --bbs.ca_file=BBS.CA_FILE BBS CA File ($CF_EXPORTER_BBS_CA_FILE) + --bbs.cert_file=BBS.CERT_FILE + BBS Cert File ($CF_EXPORTER_BBS_CERT_FILE) + --bbs.key_file=BBS.KEY_FILE + BBS Key File ($CF_EXPORTER_BBS_KEY_FILE) + --bbs.skip_ssl_verify Disable SSL Verify for BBS ($CF_EXPORTER_BBS_SKIP_SSL_VERIFY) + --bbs.timeout=5 BBS API Timeout ($CF_EXPORTER_BBS_TIMEOUT) --cf.api_url=CF.API_URL Cloud Foundry API URL ($CF_EXPORTER_CF_API_URL) --cf.username=CF.USERNAME Cloud Foundry Username ($CF_EXPORTER_CF_USERNAME) --cf.password=CF.PASSWORD Cloud Foundry Password ($CF_EXPORTER_CF_PASSWORD) diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index 7a791f09..d709937a 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -85,7 +85,7 @@ func (c *Fetcher) workInit() { c.worker.PushIf("service_route_bindings", c.fetchServiceRouteBindings, filters.ServiceRouteBindings) c.worker.PushIf("users", c.fetchUsers, filters.Events) c.worker.PushIf("events", c.fetchEvents, filters.Events) - c.worker.Push("actual_lrps", c.fetchActualLRPs) + c.worker.PushIf("actual_lrps", c.fetchActualLRPs, filters.ActualLRPs) } func (c *Fetcher) fetch() *models.CFObjects { diff --git a/fetcher/fetcher_test.go b/fetcher/fetcher_test.go index 844b9003..a2e54d15 100644 --- a/fetcher/fetcher_test.go +++ b/fetcher/fetcher_test.go @@ -40,6 +40,7 @@ var _ = ginkgo.Describe("Fetcher", func() { "spaces", "space_quotas", "applications", + "droplets", "domains", "process", "routes", @@ -54,6 +55,7 @@ var _ = ginkgo.Describe("Fetcher", func() { "service_bindings", "service_route_bindings", "segments", + "actual_lrps", } }) ginkgo.It("plans all jobs", func() { @@ -71,6 +73,7 @@ var _ = ginkgo.Describe("Fetcher", func() { "spaces", "space_quotas", "applications", + "droplets", "domains", "process", "routes", @@ -88,6 +91,7 @@ var _ = ginkgo.Describe("Fetcher", func() { "segments", "users", "events", + "actual_lrps", } }) ginkgo.It("plans all jobs", func() { @@ -225,5 +229,25 @@ var _ = ginkgo.Describe("Fetcher", func() { }) }) + ginkgo.When("droplets filter is set", func() { + ginkgo.BeforeEach(func() { + active = []string{filters.Droplets} + expected = []string{"info", "droplets"} + }) + ginkgo.It("plans only specific jobs", func() { + gomega.Ω(jobs).Should(gomega.ConsistOf(expected)) + }) + }) + + ginkgo.When("actual_lrps filter is set", func() { + ginkgo.BeforeEach(func() { + active = []string{filters.ActualLRPs} + expected = []string{"info", "actual_lrps"} + }) + ginkgo.It("plans only specific jobs", func() { + gomega.Ω(jobs).Should(gomega.ConsistOf(expected)) + }) + }) + }) }) diff --git a/filters/filters.go b/filters/filters.go index 4969129d..48a4cce4 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -6,6 +6,7 @@ import ( ) const ( + ActualLRPs = "actual_lrps" Applications = "applications" Droplets = "droplets" Buildpacks = "buildpacks" @@ -27,6 +28,7 @@ const ( var ( All = []string{ + ActualLRPs, Applications, Droplets, Buildpacks, @@ -54,6 +56,7 @@ type Filter struct { func NewFilter(active ...string) (*Filter, error) { filter := &Filter{ activated: map[string]bool{ + ActualLRPs: true, Applications: true, Droplets: true, Buildpacks: true, @@ -86,6 +89,7 @@ func NewFilter(active ...string) (*Filter, error) { func (f *Filter) setActive(active []string) error { // override default states with all disabled f.activated = map[string]bool{ + ActualLRPs: false, Applications: false, Droplets: false, Buildpacks: false, diff --git a/main.go b/main.go index f522316c..1fd69f07 100644 --- a/main.go +++ b/main.go @@ -67,7 +67,7 @@ var ( ).Envar("CF_EXPORTER_CF_DEPLOYMENT_NAME").Required().String() filterCollectors = kingpin.Flag( - "filter.collectors", "Comma separated collectors to filter (Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks,ActualLRPs). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", + "filter.collectors", "Comma separated collectors to filter (ActualLRPs,Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks,ActualLRPs). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", ).Envar("CF_EXPORTER_FILTER_COLLECTORS").Default("").String() metricsNamespace = kingpin.Flag( From ba76d41852994242981ffd940f7e1ccf15a79758 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:35:49 +0200 Subject: [PATCH 05/13] decrease complexity of collecotrs/applications.go --- collectors/applications.go | 57 +++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/collectors/applications.go b/collectors/applications.go index 3abc84b4..eceb7888 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -6,6 +6,8 @@ import ( "fmt" "time" + "code.cloudfoundry.org/cli/resources" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "github.com/cloudfoundry/cf_exporter/models" "github.com/prometheus/client_golang/prometheus" @@ -242,8 +244,6 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m return fmt.Errorf("could not find org with guid '%s'", orgRel.GUID) } - detectedBuildpack := "" - buildpack := "" stackGUID := "" for _, stack := range objs.Stacks { if stack.Name == application.Lifecycle.Data.Stack { @@ -251,29 +251,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m break } } - if dropletGUID := application.Relationships[constant.RelationshipTypeCurrentDroplet].GUID; dropletGUID != "" { - if droplet, ok := objs.Droplets[dropletGUID]; ok { - // 1. - detectedBuildpack = droplet.Buildpacks[0].DetectOutput - // 2. - buildpack = droplet.Buildpacks[0].BuildpackName - if len(detectedBuildpack) == 0 { - detectedBuildpack = buildpack - } - if len(buildpack) == 0 { - buildpack = detectedBuildpack - } - // 3.Use the droplet data for the buildpack metric - for _, bp := range droplet.Buildpacks { - c.applicationBuildpackMetric.WithLabelValues( - application.GUID, - application.Name, - bp.BuildpackName, - bp.DetectOutput, - ).Set(float64(1)) - } - } - } + detectedBuildpack, buildpack := c.collectAppBuildpacks(application, objs.Droplets) c.applicationInfoMetric.WithLabelValues( application.GUID, @@ -340,6 +318,35 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m return nil } +func (c ApplicationsCollector) collectAppBuildpacks(application models.Application, droplets map[string]resources.Droplet) (detectedBuildpack string, buildpack string) { + detectedBuildpack = "" + buildpack = "" + if dropletGUID := application.Relationships[constant.RelationshipTypeCurrentDroplet].GUID; dropletGUID != "" { + if droplet, ok := droplets[dropletGUID]; ok { + // 1. + detectedBuildpack = droplet.Buildpacks[0].DetectOutput + // 2. + buildpack = droplet.Buildpacks[0].BuildpackName + if len(detectedBuildpack) == 0 { + detectedBuildpack = buildpack + } + if len(buildpack) == 0 { + buildpack = detectedBuildpack + } + // 3.Use the droplet data for the buildpack metric + for _, bp := range droplet.Buildpacks { + c.applicationBuildpackMetric.WithLabelValues( + application.GUID, + application.Name, + bp.BuildpackName, + bp.DetectOutput, + ).Set(float64(1)) + } + } + } + return detectedBuildpack, buildpack +} + // reportApplicationsMetrics // 1. continue processing application list upon error func (c ApplicationsCollector) reportApplicationsMetrics(objs *models.CFObjects, ch chan<- prometheus.Metric) error { From 04e5ad31474f6345d8b1ccf7b70801e3a1d9300c Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:36:33 +0200 Subject: [PATCH 06/13] turn back on calls to /v2/spaces/:space_guid/summary if bbs is not configured or unreachable --- collectors/applications.go | 12 +++++++----- fetcher/bbs_client.go | 17 ++++++++++++++++- fetcher/fetcher.go | 15 ++++++++++----- fetcher/fetcher_handlers.go | 25 ++++++++++++++++++++++++- filters/filters.go | 8 ++++++++ models/model.go | 2 ++ 6 files changed, 67 insertions(+), 12 deletions(-) diff --git a/collectors/applications.go b/collectors/applications.go index eceb7888..a1bba4b0 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -6,8 +6,6 @@ import ( "fmt" "time" - "code.cloudfoundry.org/cli/resources" - "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "github.com/cloudfoundry/cf_exporter/models" "github.com/prometheus/client_golang/prometheus" @@ -251,7 +249,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m break } } - detectedBuildpack, buildpack := c.collectAppBuildpacks(application, objs.Droplets) + detectedBuildpack, buildpack := c.collectAppBuildpacks(application, objs) c.applicationInfoMetric.WithLabelValues( application.GUID, @@ -287,6 +285,10 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m } } } + } else if len(objs.AppSummaries) > 0 { + if appSummary, ok := objs.AppSummaries[application.GUID]; ok { + runningInstances = appSummary.RunningInstances + } } c.applicationInstancesRunningMetric.WithLabelValues( application.GUID, @@ -318,11 +320,11 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m return nil } -func (c ApplicationsCollector) collectAppBuildpacks(application models.Application, droplets map[string]resources.Droplet) (detectedBuildpack string, buildpack string) { +func (c ApplicationsCollector) collectAppBuildpacks(application models.Application, objs *models.CFObjects) (detectedBuildpack string, buildpack string) { detectedBuildpack = "" buildpack = "" if dropletGUID := application.Relationships[constant.RelationshipTypeCurrentDroplet].GUID; dropletGUID != "" { - if droplet, ok := droplets[dropletGUID]; ok { + if droplet, ok := objs.Droplets[dropletGUID]; ok { // 1. detectedBuildpack = droplet.Buildpacks[0].DetectOutput // 2. diff --git a/fetcher/bbs_client.go b/fetcher/bbs_client.go index bf96b2da..0bd25fe7 100644 --- a/fetcher/bbs_client.go +++ b/fetcher/bbs_client.go @@ -1,6 +1,7 @@ package fetcher import ( + "fmt" "strings" "time" @@ -51,7 +52,13 @@ func NewBBSClient(config *BBSConfig) (*BBSClient, error) { bbsClientConfig.MaxIdleConnsPerHost = maxIdleConnsPerHost } bbsClient.client, err = bbs.NewClientWithConfig(bbsClientConfig) - return &bbsClient, err + if err != nil { + return nil, err + } + if bbsClient.client.Ping(bbsClient.logger, trace.GenerateTraceID()) { + return &bbsClient, nil + } + return nil, fmt.Errorf("failed to ping BBS") } func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { @@ -60,3 +67,11 @@ func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { return actualLRPs, err } + +func (b *BBSClient) TestConnection() error { + traceID := trace.GenerateTraceID() + if b.client.Ping(b.logger, traceID) { + return nil + } + return fmt.Errorf("failed to ping BBS") +} diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index d709937a..21be85ba 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -39,12 +39,14 @@ type Fetcher struct { cfConfig *CFConfig bbsConfig *BBSConfig worker *Worker + filters *filters.Filter } func NewFetcher(threads int, config *CFConfig, bbsConfig *BBSConfig, filter *filters.Filter) *Fetcher { return &Fetcher{ cfConfig: config, bbsConfig: bbsConfig, + filters: filter, worker: NewWorker(threads, filter), } } @@ -97,11 +99,14 @@ func (c *Fetcher) fetch() *models.CFObjects { result.Error = err return result } - bbs, err := NewBBSClient(c.bbsConfig) - if err != nil { - log.WithError(err).Error("unable to initialize bbs client") - result.Error = err - return result + + var bbs *BBSClient + if c.bbsConfig.URL != "" { + bbs, err = NewBBSClient(c.bbsConfig) + if err != nil { + log.WithError(err).Error("unable to initialize bbs client") + c.filters.Disable([]string{filters.ActualLRPs}) + } } c.workInit() diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index be7a0787..c73b3625 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -1,9 +1,12 @@ package fetcher import ( + "fmt" "regexp" "time" + "github.com/cloudfoundry/cf_exporter/filters" + models2 "code.cloudfoundry.org/bbs/models" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" @@ -70,10 +73,30 @@ func (c *Fetcher) fetchOrgQuotas(session *SessionExt, _ *BBSClient, entry *model // fetchSpaces // 1. silent fail because space may have been deleted between listing and // summary fetching attempt. See cloudfoundry/cf_exporter#85 -func (c *Fetcher) fetchSpaces(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaces(session *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { spaces, _, _, err := session.V3().GetSpaces(LargeQuery) if err == nil { loadIndex(entry.Spaces, spaces, func(r resources.Space) string { return r.GUID }) + if bbs == nil { + total := len(spaces) + for idx := 0; idx < total; idx++ { + space := spaces[idx] + name := fmt.Sprintf("space_summaries %04d/%04d (%s)", idx, total, space.GUID) + c.worker.PushIf(name, func(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { + spaceSum, err := session.GetSpaceSummary(space.GUID) + if err == nil { + c.Lock() + for _, app := range spaceSum.Apps { + entry.AppSummaries[app.GUID] = app + } + c.Unlock() + } else { + log.WithError(err).Warnf("could not fetch space '%s' summary", space.GUID) + } + return nil + }, filters.Applications) + } + } } return err } diff --git a/filters/filters.go b/filters/filters.go index 48a4cce4..dafeccc7 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -86,6 +86,14 @@ func NewFilter(active ...string) (*Filter, error) { return filter, nil } +func (f *Filter) Disable(deactivate []string) { + for _, val := range deactivate { + if _, ok := f.activated[val]; ok { + f.activated[val] = false + } + } +} + func (f *Filter) setActive(active []string) error { // override default states with all disabled f.activated = map[string]bool{ diff --git a/models/model.go b/models/model.go index a40a80ac..6c46a6ae 100644 --- a/models/model.go +++ b/models/model.go @@ -33,6 +33,7 @@ type CFObjects struct { ServiceOfferings map[string]resources.ServiceOffering `json:"service_offerings"` ServicePlans map[string]resources.ServicePlan `json:"service_plans"` ServiceBindings map[string]resources.ServiceCredentialBinding `json:"service_bindings"` + AppSummaries map[string]AppSummary `json:"app_summaries"` AppProcesses map[string][]resources.Process `json:"app_processes"` ProcessActualLRPs map[string][]*models.ActualLRP `json:"process_actual_lrps"` Events map[string]Event `json:"events"` @@ -174,6 +175,7 @@ func NewCFObjects() *CFObjects { ServiceOfferings: map[string]resources.ServiceOffering{}, ServicePlans: map[string]resources.ServicePlan{}, ServiceBindings: map[string]resources.ServiceCredentialBinding{}, + AppSummaries: map[string]AppSummary{}, AppProcesses: map[string][]resources.Process{}, ProcessActualLRPs: map[string][]*models.ActualLRP{}, Users: map[string]resources.User{}, From 48d43fb425a996cfb9517fa300dd45ebc8487d99 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:40:34 +0200 Subject: [PATCH 07/13] bump cloudfoundry/cli to v8.11.0 --- go.mod | 6 +- go.sum | 20 +-- .../actor/actionerror/route_option_error.go | 22 ++++ .../actionerror/route_option_support_error.go | 12 ++ .../cli/actor/v7action/application.go | 18 ++- .../cli/actor/v7action/application_summary.go | 22 ++++ .../actor/v7action/cloud_controller_client.go | 6 +- .../cli/actor/v7action/deployment.go | 16 ++- .../process_readiness_health_check.go | 72 +++++++++++ .../cli/actor/v7action/revisions.go | 14 +++ .../cli/actor/v7action/route.go | 8 +- .../cli/api/cloudcontroller/ccv3/client.go | 2 +- .../ccv3/constant/application.go | 3 + .../ccv3/constant/deployment.go | 42 ++----- .../ccv3/constant/deployment_strategy.go | 3 + .../ccv3/constant/relationships.go | 3 + .../api/cloudcontroller/ccv3/deployment.go | 27 ++-- .../ccv3/included_resources.go | 11 ++ .../ccv3/internal/api_routes.go | 4 + .../cli/api/cloudcontroller/ccv3/paginate.go | 9 +- .../cli/api/cloudcontroller/ccv3/request.go | 10 +- .../cli/api/cloudcontroller/ccv3/revisions.go | 11 ++ .../cli/api/cloudcontroller/ccv3/route.go | 18 +++ .../cli/api/cloudcontroller/ccv3/task.go | 10 ++ .../ccversion/minimum_version.go | 3 + .../cli/api/uaa/error_converter.go | 2 +- .../cli/command/config.go | 1 + .../convert_to_translatable_error.go | 7 ++ .../code.cloudfoundry.org/cli/command/ui.go | 1 + .../cli/resources/application_resource.go | 31 ++++- .../cli/resources/deployment_resource.go | 69 ++++++++--- .../cli/resources/droplet_resource.go | 89 ++++++++++++++ .../cli/resources/metadata_resource.go | 3 +- .../cli/resources/options_resource.go | 24 ++++ .../cli/resources/process_resource.go | 62 ++++++++-- .../cli/resources/revision_resource.go | 16 +-- .../cli/resources/route_resource.go | 30 ++++- .../cli/resources/task_resource.go | 7 ++ .../cli/util/clissh/ssh.go | 13 +- .../cli/util/configv3/env.go | 17 +++ .../cli/util/configv3/load_config.go | 1 + .../cli/util/ui/request_logger_file_writer.go | 2 +- .../code.cloudfoundry.org/cli/util/ui/ui.go | 9 ++ .../cli/version/version.go | 8 +- .../code.cloudfoundry.org/lager/v3/README.md | 116 ++++-------------- .../code.cloudfoundry.org/lager/v3/handler.go | 6 +- .../lager/v3/internal/truncate/truncate.go | 14 ++- .../lager/v3/redacting_sink.go | 25 ++-- .../lager/v3/truncating_sink.go | 11 +- vendor/modules.txt | 12 +- 50 files changed, 680 insertions(+), 268 deletions(-) create mode 100644 vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_error.go create mode 100644 vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_support_error.go create mode 100644 vendor/code.cloudfoundry.org/cli/actor/v7action/process_readiness_health_check.go create mode 100644 vendor/code.cloudfoundry.org/cli/resources/options_resource.go diff --git a/go.mod b/go.mod index cbf2f22d..c4c8eee9 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.24.1 require ( code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 - code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce - code.cloudfoundry.org/lager/v3 v3.0.3 + code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 + code.cloudfoundry.org/lager/v3 v3.27.0 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0 github.com/onsi/ginkgo v1.16.5 @@ -64,7 +64,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo/v2 v2.23.4 // indirect - github.com/openzipkin/zipkin-go v0.4.2 // indirect + github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect diff --git a/go.sum b/go.sum index 57ee6d69..d1df5ff6 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ code.cloudfoundry.org/cfhttp/v2 v2.44.0 h1:SHb2oWRrMEigXQCfiXwmSkSZLZSZ+ua0AVbav code.cloudfoundry.org/cfhttp/v2 v2.44.0/go.mod h1:OYSxfFKC0HY7cbeXh2iQVcp4HnbucPBa0naTkOxzKZk= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 h1:Yc9r1p21kEpni9WlG4mwOZw87TB2QlyS9sAEebZ3+ak= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6/go.mod h1:u5FovqC5GGAEbFPz+IdjycDA+gIjhUwqxnu0vbHwVeM= -code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce h1:Lg/u08txpuLjC7FQ4Q7DtQB2VZbmCXBbgyZbmBiwvI8= -code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce/go.mod h1:I6bhATQKPAMQ5fdUxioHxt95aTfqJ3NibHnMIO2N0Kk= +code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 h1:H3/+78JZSlecs6B76WkNsChGNIKB+Ns72deBEATGsEw= +code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7/go.mod h1:IdbcFbjs2waQvE3UvDLutHgUz8JiLqF3gW4Xeavs2rk= code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240520170503-e7ed9c7432a0 h1:AqfPzPhykvxSpxMkDdZMSJrCbxMQ0rugvDB3BuqD0DE= code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240520170503-e7ed9c7432a0/go.mod h1:R1EiyOAr7lW0l/YkZNqItUNZ01Q/dYUfbTn4X4Z+82M= code.cloudfoundry.org/clock v1.32.0 h1:GlGItvgiaemkPwxb+2/GNUfxiCs+1rjGiqTI8qZKJY8= @@ -22,8 +22,8 @@ code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTg code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= code.cloudfoundry.org/jsonry v1.1.4 h1:P9N7IlH1/4aRCLcXLgLFj1hkcBmV7muijJzY+K6U4hE= code.cloudfoundry.org/jsonry v1.1.4/go.mod h1:6aKilShQP7w/Ez76h1El2/n9y2OkHuU56nKSBB9Gp0A= -code.cloudfoundry.org/lager/v3 v3.0.3 h1:/UTmadZfIaKuT/whEinSxK1mzRfNu1uPfvjFfGqiwzM= -code.cloudfoundry.org/lager/v3 v3.0.3/go.mod h1:Zn5q1SrIuuHjEUE7xerMKt3ztunrJQCZETAo7rV0CH8= +code.cloudfoundry.org/lager/v3 v3.27.0 h1:LZ/cxraneE84Rq7J7Z6lDA7x/nbe5iOM90yKJmVMhAU= +code.cloudfoundry.org/lager/v3 v3.27.0/go.mod h1:f8QnZ+7h8JXO/qujwTKVbuPj83gJuVJEXuet/1DTjjc= code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 h1:m6Zwwr6HjmdXS/EGwIhar0N6ExQZvmqYSC23MNE+5jc= code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201/go.mod h1:AwHLRkdXtttLXNB8RHgLfErJ2kKafH62AR2OClhy6xI= code.cloudfoundry.org/tlsconfig v0.22.0 h1:zgzDd4lp++vov8azKP1LdAOBEViPRwm1lg67FHJ4W7Q= @@ -64,10 +64,10 @@ github.com/cloudfoundry/bosh-utils v0.0.538 h1:5J99nCN0NynRK9lUcIAXeswW4pv2bkhyL github.com/cloudfoundry/bosh-utils v0.0.538/go.mod h1:ohCt2Llxkj8/sgG3zipUhpHdB2bYgwCcFTzkkRNDpo8= github.com/cppforlife/go-patch v0.2.0 h1:Y14MnCQjDlbw7WXT4k+u6DPAA9XnygN4BfrSpI/19RU= github.com/cppforlife/go-patch v0.2.0/go.mod h1:67a7aIi94FHDZdoeGSJRRFDp66l9MhaAG1yGxpUoFD8= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= -github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -205,8 +205,8 @@ github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9 github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= -github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= +github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= +github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_error.go b/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_error.go new file mode 100644 index 00000000..005633e5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_error.go @@ -0,0 +1,22 @@ +package actionerror + +import "fmt" + +// RouteOptionError is returned when a route option was specified in the wrong format +type RouteOptionError struct { + Name string + Host string + DomainName string + Path string +} + +func (e RouteOptionError) Error() string { + return fmt.Sprintf("Route option '%s' for route with host '%s', domain '%s', and path '%s' was specified incorrectly. Please use key-value pair format key=value.", e.Name, e.Host, e.DomainName, e.path()) +} + +func (e RouteOptionError) path() string { + if e.Path == "" { + return "/" + } + return e.Path +} diff --git a/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_support_error.go b/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_support_error.go new file mode 100644 index 00000000..9fcffbb4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/actor/actionerror/route_option_support_error.go @@ -0,0 +1,12 @@ +package actionerror + +import "fmt" + +// RouteOptionSupportError is returned when route options are not supported +type RouteOptionSupportError struct { + ErrorText string +} + +func (e RouteOptionSupportError) Error() string { + return fmt.Sprintf("Route option support: '%s'", e.ErrorText) +} diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/application.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/application.go index 68470090..e86ca01c 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/application.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/application.go @@ -162,6 +162,7 @@ func (actor Actor) CreateApplicationInSpace(app resources.Application, spaceGUID LifecycleBuildpacks: app.LifecycleBuildpacks, StackName: app.StackName, Name: app.Name, + Credentials: app.Credentials, SpaceGUID: spaceGUID, }) @@ -298,9 +299,9 @@ func (actor Actor) PollStart(app resources.Application, noWait bool, handleInsta } } -// PollStartForRolling polls a deploying application's processes until some are started. It does the same thing as PollStart, except it accounts for rolling deployments and whether +// PollStartForDeployment polls a deploying application's processes until some are started. It does the same thing as PollStart, except it accounts for rolling/canary deployments and whether // they have failed or been canceled during polling. -func (actor Actor) PollStartForRolling(app resources.Application, deploymentGUID string, noWait bool, handleInstanceDetails func(string)) (Warnings, error) { +func (actor Actor) PollStartForDeployment(app resources.Application, deploymentGUID string, noWait bool, handleInstanceDetails func(string)) (Warnings, error) { var ( deployment resources.Deployment processes []resources.Process @@ -321,7 +322,7 @@ func (actor Actor) PollStartForRolling(app resources.Application, deploymentGUID } return allWarnings, actionerror.StartupTimeoutError{Name: app.Name} case <-timer.C(): - if !isDeployed(deployment) { + if !isDeployProcessed(deployment) { ccDeployment, warnings, err := actor.getDeployment(deploymentGUID) allWarnings = append(allWarnings, warnings...) if err != nil { @@ -335,7 +336,7 @@ func (actor Actor) PollStartForRolling(app resources.Application, deploymentGUID } } - if noWait || isDeployed(deployment) { + if noWait || isDeployProcessed(deployment) { stopPolling, warnings, err := actor.PollProcesses(processes, handleInstanceDetails) allWarnings = append(allWarnings, warnings...) if stopPolling || err != nil { @@ -348,7 +349,12 @@ func (actor Actor) PollStartForRolling(app resources.Application, deploymentGUID } } -func isDeployed(d resources.Deployment) bool { +func isDeployProcessed(d resources.Deployment) bool { + if d.Strategy == constant.DeploymentStrategyCanary { + return d.StatusValue == constant.DeploymentStatusValueActive && d.StatusReason == constant.DeploymentStatusReasonPaused || + d.StatusValue == constant.DeploymentStatusValueFinalized && d.StatusReason == constant.DeploymentStatusReasonDeployed + } + return d.StatusValue == constant.DeploymentStatusValueFinalized && d.StatusReason == constant.DeploymentStatusReasonDeployed } @@ -439,7 +445,7 @@ func (actor Actor) getProcesses(deployment resources.Deployment, appGUID string, // if the deployment is deployed we know web are all running and PollProcesses will see those as stable // so just getting all processes is equivalent to just getting non-web ones and polling those - if isDeployed(deployment) { + if isDeployProcessed(deployment) { processes, warnings, err := actor.CloudControllerClient.GetApplicationProcesses(appGUID) if err != nil { return processes, Warnings(warnings), err diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/application_summary.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/application_summary.go index d33be9dd..78372053 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/application_summary.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/application_summary.go @@ -1,6 +1,8 @@ package v7action import ( + "errors" + "code.cloudfoundry.org/cli/actor/actionerror" "code.cloudfoundry.org/cli/api/cloudcontroller/ccerror" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" @@ -18,6 +20,7 @@ type ApplicationSummary struct { type DetailedApplicationSummary struct { ApplicationSummary CurrentDroplet resources.Droplet + Deployment resources.Deployment } func (a ApplicationSummary) GetIsolationSegmentName() (string, bool) { @@ -120,6 +123,12 @@ func (actor Actor) GetDetailedAppSummary(appName, spaceGUID string, withObfuscat return DetailedApplicationSummary{}, allWarnings, err } + detailedSummary, warnings, err = actor.addDeployment(detailedSummary) + allWarnings = append(allWarnings, warnings...) + if err != nil { + return DetailedApplicationSummary{}, allWarnings, err + } + return detailedSummary, allWarnings, err } @@ -206,6 +215,19 @@ func (actor Actor) addDroplet(summary ApplicationSummary) (DetailedApplicationSu }, allWarnings, nil } +func (actor Actor) addDeployment(detailedSummary DetailedApplicationSummary) (DetailedApplicationSummary, Warnings, error) { + var allWarnings Warnings + + deployment, warnings, err := actor.GetLatestActiveDeploymentForApp(detailedSummary.GUID) + allWarnings = append(allWarnings, warnings...) + if err != nil && !errors.Is(err, actionerror.ActiveDeploymentNotFoundError{}) { + return DetailedApplicationSummary{}, allWarnings, err + } + + detailedSummary.Deployment = deployment + return detailedSummary, allWarnings, nil +} + func toAppGUIDs(apps []resources.Application) []string { guids := make([]string, len(apps)) diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go index 88849010..1350fd09 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go @@ -18,10 +18,10 @@ type CloudControllerClient interface { ApplySpaceQuota(quotaGUID string, spaceGUID string) (resources.RelationshipList, ccv3.Warnings, error) CheckRoute(domainGUID string, hostname string, path string, port int) (bool, ccv3.Warnings, error) CancelDeployment(deploymentGUID string) (ccv3.Warnings, error) + ContinueDeployment(deploymentGUID string) (ccv3.Warnings, error) CopyPackage(sourcePackageGUID string, targetAppGUID string) (resources.Package, ccv3.Warnings, error) CreateApplication(app resources.Application) (resources.Application, ccv3.Warnings, error) - CreateApplicationDeployment(appGUID string, dropletGUID string) (string, ccv3.Warnings, error) - CreateApplicationDeploymentByRevision(appGUID string, revisionGUID string) (string, ccv3.Warnings, error) + CreateApplicationDeployment(dep resources.Deployment) (string, ccv3.Warnings, error) CreateApplicationProcessScale(appGUID string, process resources.Process) (resources.Process, ccv3.Warnings, error) CreateApplicationTask(appGUID string, task resources.Task) (resources.Task, ccv3.Warnings, error) CreateBuild(build resources.Build) (resources.Build, ccv3.Warnings, error) @@ -84,6 +84,7 @@ type CloudControllerClient interface { GetDroplet(guid string) (resources.Droplet, ccv3.Warnings, error) GetDroplets(query ...ccv3.Query) ([]resources.Droplet, ccv3.Warnings, error) GetEnvironmentVariableGroup(group constant.EnvironmentVariableGroupName) (resources.EnvironmentVariables, ccv3.Warnings, error) + GetEnvironmentVariablesByURL(url string) (resources.EnvironmentVariables, ccv3.Warnings, error) GetEvents(query ...ccv3.Query) ([]ccv3.Event, ccv3.Warnings, error) GetFeatureFlag(featureFlagName string) (resources.FeatureFlag, ccv3.Warnings, error) GetFeatureFlags() ([]resources.FeatureFlag, ccv3.Warnings, error) @@ -176,6 +177,7 @@ type CloudControllerClient interface { UpdateOrganizationQuota(orgQuota resources.OrganizationQuota) (resources.OrganizationQuota, ccv3.Warnings, error) UpdateProcess(process resources.Process) (resources.Process, ccv3.Warnings, error) UpdateResourceMetadata(resource string, resourceGUID string, metadata resources.Metadata) (ccv3.JobURL, ccv3.Warnings, error) + UpdateRoute(routeGUID string, options map[string]*string) (resources.Route, ccv3.Warnings, error) UpdateSecurityGroupRunningSpace(securityGroupGUID string, spaceGUIDs []string) (ccv3.Warnings, error) UpdateSecurityGroupStagingSpace(securityGroupGUID string, spaceGUIDs []string) (ccv3.Warnings, error) UpdateSecurityGroup(securityGroup resources.SecurityGroup) (resources.SecurityGroup, ccv3.Warnings, error) diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go index d707b577..e21873d8 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go @@ -7,15 +7,8 @@ import ( "code.cloudfoundry.org/cli/resources" ) -func (actor Actor) CreateDeploymentByApplicationAndDroplet(appGUID string, dropletGUID string) (string, Warnings, error) { - deploymentGUID, warnings, err := actor.CloudControllerClient.CreateApplicationDeployment(appGUID, dropletGUID) - - return deploymentGUID, Warnings(warnings), err -} - -func (actor Actor) CreateDeploymentByApplicationAndRevision(appGUID string, revisionGUID string) (string, Warnings, error) { - deploymentGUID, warnings, err := actor.CloudControllerClient.CreateApplicationDeploymentByRevision(appGUID, revisionGUID) - +func (actor Actor) CreateDeployment(dep resources.Deployment) (string, Warnings, error) { + deploymentGUID, warnings, err := actor.CloudControllerClient.CreateApplicationDeployment(dep) return deploymentGUID, Warnings(warnings), err } @@ -43,3 +36,8 @@ func (actor Actor) CancelDeployment(deploymentGUID string) (Warnings, error) { warnings, err := actor.CloudControllerClient.CancelDeployment(deploymentGUID) return Warnings(warnings), err } + +func (actor Actor) ContinueDeployment(deploymentGUID string) (Warnings, error) { + warnings, err := actor.CloudControllerClient.ContinueDeployment(deploymentGUID) + return Warnings(warnings), err +} diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/process_readiness_health_check.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/process_readiness_health_check.go new file mode 100644 index 00000000..4dbe8603 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/process_readiness_health_check.go @@ -0,0 +1,72 @@ +package v7action + +import ( + "sort" + + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" +) + +type ProcessReadinessHealthCheck struct { + ProcessType string + HealthCheckType constant.HealthCheckType + Endpoint string + InvocationTimeout int64 + Interval int64 +} + +type ProcessReadinessHealthChecks []ProcessReadinessHealthCheck + +func (phs ProcessReadinessHealthChecks) Sort() { + sort.Slice(phs, func(i int, j int) bool { + var iScore int + var jScore int + + switch phs[i].ProcessType { + case constant.ProcessTypeWeb: + iScore = 0 + default: + iScore = 1 + } + + switch phs[j].ProcessType { + case constant.ProcessTypeWeb: + jScore = 0 + default: + jScore = 1 + } + + if iScore == 1 && jScore == 1 { + return phs[i].ProcessType < phs[j].ProcessType + } + return iScore < jScore + }) +} + +func (actor Actor) GetApplicationProcessReadinessHealthChecksByNameAndSpace(appName string, spaceGUID string) ([]ProcessReadinessHealthCheck, Warnings, error) { + app, allWarnings, err := actor.GetApplicationByNameAndSpace(appName, spaceGUID) + if err != nil { + return nil, allWarnings, err + } + + ccv3Processes, warnings, err := actor.CloudControllerClient.GetApplicationProcesses(app.GUID) + allWarnings = append(allWarnings, Warnings(warnings)...) + if err != nil { + return nil, allWarnings, err + } + + var processReadinessHealthChecks ProcessReadinessHealthChecks + for _, ccv3Process := range ccv3Processes { + processReadinessHealthCheck := ProcessReadinessHealthCheck{ + ProcessType: ccv3Process.Type, + HealthCheckType: ccv3Process.ReadinessHealthCheckType, + Endpoint: ccv3Process.ReadinessHealthCheckEndpoint, + InvocationTimeout: ccv3Process.ReadinessHealthCheckInvocationTimeout, + Interval: ccv3Process.ReadinessHealthCheckInterval, + } + processReadinessHealthChecks = append(processReadinessHealthChecks, processReadinessHealthCheck) + } + + processReadinessHealthChecks.Sort() + + return processReadinessHealthChecks, allWarnings, nil +} diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/revisions.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/revisions.go index c2109c8f..f159ab64 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/revisions.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/revisions.go @@ -76,6 +76,20 @@ func (actor Actor) GetRevisionByApplicationAndVersion(appGUID string, revisionVe return revisions[0], Warnings(warnings), nil } +func (actor Actor) GetEnvironmentVariableGroupByRevision(revision resources.Revision) (EnvironmentVariableGroup, bool, Warnings, error) { + envVarApiLink, isPresent := revision.Links["environment_variables"] + if !isPresent { + return EnvironmentVariableGroup{}, isPresent, Warnings{"Unable to retrieve environment variables for revision."}, nil + } + + environmentVariables, warnings, err := actor.CloudControllerClient.GetEnvironmentVariablesByURL(envVarApiLink.HREF) + if err != nil { + return EnvironmentVariableGroup{}, false, Warnings(warnings), err + } + + return EnvironmentVariableGroup(environmentVariables), true, Warnings(warnings), nil +} + func (actor Actor) setRevisionsDeployableByDropletStateForApp(appGUID string, revisions []resources.Revision) ([]resources.Revision, Warnings, error) { droplets, warnings, err := actor.CloudControllerClient.GetDroplets( ccv3.Query{Key: ccv3.AppGUIDFilter, Values: []string{appGUID}}, diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/route.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/route.go index 7bccf6bd..2f2c2f70 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/route.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/route.go @@ -26,7 +26,7 @@ type RouteSummary struct { ServiceInstanceName string } -func (actor Actor) CreateRoute(spaceGUID, domainName, hostname, path string, port int) (resources.Route, Warnings, error) { +func (actor Actor) CreateRoute(spaceGUID, domainName, hostname, path string, port int, options map[string]*string) (resources.Route, Warnings, error) { allWarnings := Warnings{} domain, warnings, err := actor.GetDomainByName(domainName) allWarnings = append(allWarnings, warnings...) @@ -41,6 +41,7 @@ func (actor Actor) CreateRoute(spaceGUID, domainName, hostname, path string, por Host: hostname, Path: path, Port: port, + Options: options, }) actorWarnings := Warnings(apiWarnings) @@ -401,6 +402,11 @@ func (actor Actor) MapRoute(routeGUID string, appGUID string, destinationProtoco return Warnings(warnings), err } +func (actor Actor) UpdateRoute(routeGUID string, options map[string]*string) (resources.Route, Warnings, error) { + route, warnings, err := actor.CloudControllerClient.UpdateRoute(routeGUID, options) + return route, Warnings(warnings), err +} + func (actor Actor) UpdateDestination(routeGUID string, destinationGUID string, protocol string) (Warnings, error) { warnings, err := actor.CloudControllerClient.UpdateDestination(routeGUID, destinationGUID, protocol) return Warnings(warnings), err diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go index ffb7ef5d..94cddc0b 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go @@ -7,7 +7,7 @@ // may include features and endpoints of later API versions. // // For more information on the Cloud Controller API see -// https://apidocs.cloudfoundry.org/ +// https://v2-apidocs.cloudfoundry.org/ // // # Method Naming Conventions // diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/application.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/application.go index b0acd644..dca0b70a 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/application.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/application.go @@ -9,6 +9,9 @@ const ( // AppLifecycleTypeDocker will pull a docker image from a registry to run an // app. AppLifecycleTypeDocker AppLifecycleType = "docker" + // AppLifecycleTypeCNB will use a droplet (created with cloud native buildpacks) + // and a rootfs to run the app. + AppLifecycleTypeCNB AppLifecycleType = "cnb" ) // ApplicationAction represents the action being taken on an application diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment.go index d324cb9a..2ae6159e 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment.go @@ -5,51 +5,29 @@ package constant type DeploymentState string const ( - // DeploymentDeploying means the deployment is in state 'DEPLOYING' DeploymentDeploying DeploymentState = "DEPLOYING" - - // DeploymentCanceled means the deployment is in state 'CANCELED' - DeploymentCanceled DeploymentState = "CANCELED" - - // DeploymentDeployed means the deployment is in state 'DEPLOYED' - DeploymentDeployed DeploymentState = "DEPLOYED" - - // DeploymentCanceled means the deployment is in state 'CANCELING' + DeploymentCanceled DeploymentState = "CANCELED" + DeploymentDeployed DeploymentState = "DEPLOYED" DeploymentCanceling DeploymentState = "CANCELING" - - // DeploymentFailing means the deployment is in state 'FAILING' - DeploymentFailing DeploymentState = "FAILING" - - // DeploymentFailed means the deployment is in state 'FAILED' - DeploymentFailed DeploymentState = "FAILED" + DeploymentFailing DeploymentState = "FAILING" + DeploymentFailed DeploymentState = "FAILED" ) // DeploymentStatusReason describes the status reasons a deployment can have type DeploymentStatusReason string const ( - // DeploymentStatusReasonDeployed means the deployment's status.value is - // 'DEPLOYED' - DeploymentStatusReasonDeployed DeploymentStatusReason = "DEPLOYED" - - // DeploymentStatusReasonCanceled means the deployment's status.value is - // 'CANCELED' - DeploymentStatusReasonCanceled DeploymentStatusReason = "CANCELED" - - // DeploymentStatusReasonSuperseded means the deployment's status.value is - // 'SUPERSEDED' + DeploymentStatusReasonDeploying DeploymentStatusReason = "DEPLOYING" + DeploymentStatusReasonCanceling DeploymentStatusReason = "CANCELING" + DeploymentStatusReasonDeployed DeploymentStatusReason = "DEPLOYED" + DeploymentStatusReasonCanceled DeploymentStatusReason = "CANCELED" DeploymentStatusReasonSuperseded DeploymentStatusReason = "SUPERSEDED" + DeploymentStatusReasonPaused DeploymentStatusReason = "PAUSED" ) -// DeploymentStatusValue describes the status values a deployment can have type DeploymentStatusValue string const ( - // DeploymentStatusValueActive means the deployment's status.value is - // 'ACTIVE' - DeploymentStatusValueActive DeploymentStatusValue = "ACTIVE" - - // DeploymentStatusValueFinalized means the deployment's status.value is - // 'FINALIZED' + DeploymentStatusValueActive DeploymentStatusValue = "ACTIVE" DeploymentStatusValueFinalized DeploymentStatusValue = "FINALIZED" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment_strategy.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment_strategy.go index 2e607f65..de513bc6 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment_strategy.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/deployment_strategy.go @@ -9,4 +9,7 @@ const ( // Rolling means a new web process will be created for the app and instances will roll from the old one to the new one. DeploymentStrategyRolling DeploymentStrategy = "rolling" + + // Canary means after a web process is created for the app the deployment will pause for evaluation until it is continued or canceled. + DeploymentStrategyCanary DeploymentStrategy = "canary" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/relationships.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/relationships.go index 6a8b3c12..06e4eb39 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/relationships.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant/relationships.go @@ -21,4 +21,7 @@ const ( // RelationshipTypeQuota is a relationship with a Cloud Controller quota (org quota or space quota). RelationshipTypeQuota RelationshipType = "quota" + + // RelationshipTypeCurrentDroplet is a relationship with a Droplet. + RelationshipTypeCurrentDroplet RelationshipType = "current_droplet" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/deployment.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/deployment.go index a4dc7da0..3d72e47f 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/deployment.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/deployment.go @@ -1,42 +1,29 @@ package ccv3 import ( - "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal" "code.cloudfoundry.org/cli/resources" ) -func (client *Client) CancelDeployment(deploymentGUID string) (Warnings, error) { +func (client *Client) ContinueDeployment(deploymentGUID string) (Warnings, error) { _, warnings, err := client.MakeRequest(RequestParams{ - RequestName: internal.PostApplicationDeploymentActionCancelRequest, + RequestName: internal.PostApplicationDeploymentActionContinueRequest, URIParams: internal.Params{"deployment_guid": deploymentGUID}, }) return warnings, err } -func (client *Client) CreateApplicationDeployment(appGUID string, dropletGUID string) (string, Warnings, error) { - dep := resources.Deployment{ - DropletGUID: dropletGUID, - Relationships: resources.Relationships{constant.RelationshipTypeApplication: resources.Relationship{GUID: appGUID}}, - } - - var responseBody resources.Deployment - +func (client *Client) CancelDeployment(deploymentGUID string) (Warnings, error) { _, warnings, err := client.MakeRequest(RequestParams{ - RequestName: internal.PostApplicationDeploymentRequest, - RequestBody: dep, - ResponseBody: &responseBody, + RequestName: internal.PostApplicationDeploymentActionCancelRequest, + URIParams: internal.Params{"deployment_guid": deploymentGUID}, }) - return responseBody.GUID, warnings, err + return warnings, err } -func (client *Client) CreateApplicationDeploymentByRevision(appGUID string, revisionGUID string) (string, Warnings, error) { - dep := resources.Deployment{ - RevisionGUID: revisionGUID, - Relationships: resources.Relationships{constant.RelationshipTypeApplication: resources.Relationship{GUID: appGUID}}, - } +func (client *Client) CreateApplicationDeployment(dep resources.Deployment) (string, Warnings, error) { var responseBody resources.Deployment diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/included_resources.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/included_resources.go index 00447076..24f3b5a0 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/included_resources.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/included_resources.go @@ -12,3 +12,14 @@ type IncludedResources struct { ServicePlans []resources.ServicePlan `json:"service_plans,omitempty"` Apps []resources.Application `json:"apps,omitempty"` } + +func (i *IncludedResources) Merge(resources IncludedResources) { + i.Apps = append(i.Apps, resources.Apps...) + i.Users = append(i.Users, resources.Users...) + i.Organizations = append(i.Organizations, resources.Organizations...) + i.Spaces = append(i.Spaces, resources.Spaces...) + i.ServiceBrokers = append(i.ServiceBrokers, resources.ServiceBrokers...) + i.ServiceInstances = append(i.ServiceInstances, resources.ServiceInstances...) + i.ServiceOfferings = append(i.ServiceOfferings, resources.ServiceOfferings...) + i.ServicePlans = append(i.ServicePlans, resources.ServicePlans...) +} diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go index c095b2df..fbaf2a9c 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go @@ -138,6 +138,7 @@ const ( PostApplicationActionStartRequest = "PostApplicationActionStart" PostApplicationActionStopRequest = "PostApplicationActionStop" PostApplicationDeploymentActionCancelRequest = "PostApplicationDeploymentActionCancel" + PostApplicationDeploymentActionContinueRequest = "PostApplicationDeploymentActionContinue" PostApplicationDeploymentRequest = "PostApplicationDeployment" PostApplicationProcessActionScaleRequest = "PostApplicationProcessActionScale" PostApplicationRequest = "PostApplication" @@ -178,6 +179,7 @@ const ( ShareRouteRequest = "ShareRouteRequest" UnmapRouteRequest = "UnmapRoute" UnshareRouteRequest = "UnshareRoute" + UpdateRouteRequest = "UpdateRoute" WhoAmI = "WhoAmI" ) @@ -219,6 +221,7 @@ var APIRoutes = map[string]Route{ PostApplicationDeploymentRequest: {Path: "/v3/deployments", Method: http.MethodPost}, GetDeploymentRequest: {Path: "/v3/deployments/:deployment_guid", Method: http.MethodGet}, PostApplicationDeploymentActionCancelRequest: {Path: "/v3/deployments/:deployment_guid/actions/cancel", Method: http.MethodPost}, + PostApplicationDeploymentActionContinueRequest: {Path: "/v3/deployments/:deployment_guid/actions/continue", Method: http.MethodPost}, GetDomainsRequest: {Path: "/v3/domains", Method: http.MethodGet}, PostDomainRequest: {Path: "/v3/domains", Method: http.MethodPost}, DeleteDomainRequest: {Path: "/v3/domains/:domain_guid", Method: http.MethodDelete}, @@ -280,6 +283,7 @@ var APIRoutes = map[string]Route{ PatchRouteRequest: {Path: "/v3/routes/:route_guid", Method: http.MethodPatch}, GetRouteDestinationsRequest: {Path: "/v3/routes/:route_guid/destinations", Method: http.MethodGet}, MapRouteRequest: {Path: "/v3/routes/:route_guid/destinations", Method: http.MethodPost}, + UpdateRouteRequest: {Path: "/v3/routes/:route_guid", Method: http.MethodPatch}, UnmapRouteRequest: {Path: "/v3/routes/:route_guid/destinations/:destination_guid", Method: http.MethodDelete}, PatchDestinationRequest: {Path: "/v3/routes/:route_guid/destinations/:destination_guid", Method: http.MethodPatch}, ShareRouteRequest: {Path: "/v3/routes/:route_guid/relationships/shared_spaces", Method: http.MethodPost}, diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/paginate.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/paginate.go index bf87cce3..a31fe0ec 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/paginate.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/paginate.go @@ -17,14 +17,7 @@ func (requester RealRequester) paginate(request *cloudcontroller.Request, obj in return IncludedResources{}, fullWarningsList, err } - includes.Apps = append(includes.Apps, wrapper.IncludedResources.Apps...) - includes.Users = append(includes.Users, wrapper.IncludedResources.Users...) - includes.Organizations = append(includes.Organizations, wrapper.IncludedResources.Organizations...) - includes.Spaces = append(includes.Spaces, wrapper.IncludedResources.Spaces...) - includes.ServiceBrokers = append(includes.ServiceBrokers, wrapper.IncludedResources.ServiceBrokers...) - includes.ServiceInstances = append(includes.ServiceInstances, wrapper.IncludedResources.ServiceInstances...) - includes.ServiceOfferings = append(includes.ServiceOfferings, wrapper.IncludedResources.ServiceOfferings...) - includes.ServicePlans = append(includes.ServicePlans, wrapper.IncludedResources.ServicePlans...) + includes.Merge(wrapper.IncludedResources) if specificPage || wrapper.NextPage() == "" { break diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/request.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/request.go index dc799fc5..65b988ac 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/request.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/request.go @@ -3,6 +3,7 @@ package ccv3 import ( "io" "net/http" + "strings" "code.cloudfoundry.org/cli/api/cloudcontroller" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal" @@ -57,6 +58,7 @@ func (requester *RealRequester) newHTTPRequest(passedRequest requestOptions) (*c } request.Header = http.Header{} + if passedRequest.Header != nil { request.Header = passedRequest.Header } @@ -69,9 +71,15 @@ func (requester *RealRequester) newHTTPRequest(passedRequest requestOptions) (*c request.Header.Set("Accept", "application/json") } - if request.Header.Get("Content-Type") == "" { + if !isDownloadDroplet(passedRequest.URL, passedRequest.RequestName) && request.Header.Get("Content-Type") == "" { request.Header.Set("Content-Type", "application/json") + } else if isDownloadDroplet(passedRequest.URL, passedRequest.RequestName) && request.Header.Get("Content-Type") != "" { + request.Header.Del("Content-Type") } return cloudcontroller.NewRequest(request, passedRequest.Body), nil } + +func isDownloadDroplet(URL string, requestName string) bool { + return (strings.Contains(URL, "droplet") && strings.Contains(URL, "download")) || (requestName == internal.GetDropletBitsRequest) +} diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/revisions.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/revisions.go index 67b14a0f..d50fca26 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/revisions.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/revisions.go @@ -35,3 +35,14 @@ func (client *Client) GetApplicationRevisionsDeployed(appGUID string) ([]resourc }) return revisions, warnings, err } + +func (client *Client) GetEnvironmentVariablesByURL(url string) (resources.EnvironmentVariables, Warnings, error) { + environmentVariables := make(resources.EnvironmentVariables) + + _, warnings, err := client.MakeRequest(RequestParams{ + URL: url, + ResponseBody: &environmentVariables, + }) + + return environmentVariables, warnings, err +} diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/route.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/route.go index afff0af5..399bee2f 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/route.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/route.go @@ -82,6 +82,24 @@ func (client Client) GetRoutes(query ...Query) ([]resources.Route, Warnings, err return routes, warnings, err } +func (client Client) UpdateRoute(routeGUID string, options map[string]*string) (resources.Route, Warnings, error) { + var responseBody resources.Route + var route = resources.Route{} + var uriParams = internal.Params{"route_guid": routeGUID} + + route.Options = options + + _, warnings, err := client.MakeRequest(RequestParams{ + RequestName: internal.UpdateRouteRequest, + URIParams: uriParams, + RequestBody: route, + ResponseBody: &responseBody, + }) + + return responseBody, warnings, err + +} + func (client Client) MapRoute(routeGUID string, appGUID string, destinationProtocol string) (Warnings, error) { type destinationProcess struct { ProcessType string `json:"process_type"` diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/task.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/task.go index 359ee927..91c99814 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/task.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/task.go @@ -25,6 +25,16 @@ func (client *Client) CreateApplicationTask(appGUID string, task resources.Task) func (client *Client) GetApplicationTasks(appGUID string, query ...Query) ([]resources.Task, Warnings, error) { var tasks []resources.Task + foundPerPageQuery := false + for _, keyVal := range query { + if keyVal.Key == PerPage { + foundPerPageQuery = true + } + } + if !foundPerPageQuery { + query = append(query, Query{Key: PerPage, Values: []string{MaxPerPage}}) + } + _, warnings, err := client.MakeListRequest(RequestParams{ RequestName: internal.GetApplicationTasksRequest, URIParams: internal.Params{"app_guid": appGUID}, diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go index 5158ab18..fcee5cdb 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go @@ -15,4 +15,7 @@ const ( MinVersionSpaceSupporterV3 = "3.104.0" MinVersionLogRateLimitingV3 = "3.124.0" // TODO: update this when we have a CAPI release + + MinVersionCNB = "3.168.0" + MinVersionPerRouteOpts = "3.183.0" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/error_converter.go b/vendor/code.cloudfoundry.org/cli/api/uaa/error_converter.go index ebbe950c..c5ecb299 100644 --- a/vendor/code.cloudfoundry.org/cli/api/uaa/error_converter.go +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/error_converter.go @@ -53,7 +53,7 @@ func convert(rawHTTPStatusErr RawHTTPStatusError) error { if uaaErrorResponse.Type == "invalid_token" { return InvalidAuthTokenError{Message: uaaErrorResponse.Description} } - if uaaErrorResponse.Type == "unauthorized" { + if uaaErrorResponse.Type == "unauthorized" || uaaErrorResponse.Type == "invalid_client" { if uaaErrorResponse.Description == "Your account has been locked because of too many failed attempts to login." { return AccountLockedError{Message: "Your account has been locked because of too many failed attempts to login."} } diff --git a/vendor/code.cloudfoundry.org/cli/command/config.go b/vendor/code.cloudfoundry.org/cli/command/config.go index 44b30e5b..676223c7 100644 --- a/vendor/code.cloudfoundry.org/cli/command/config.go +++ b/vendor/code.cloudfoundry.org/cli/command/config.go @@ -24,6 +24,7 @@ type Config interface { CurrentUserName() (string, error) DialTimeout() time.Duration DockerPassword() string + CNBCredentials() (map[string]interface{}, error) Experimental() bool GetPlugin(pluginName string) (configv3.Plugin, bool) GetPluginCaseInsensitive(pluginName string) (configv3.Plugin, bool) diff --git a/vendor/code.cloudfoundry.org/cli/command/translatableerror/convert_to_translatable_error.go b/vendor/code.cloudfoundry.org/cli/command/translatableerror/convert_to_translatable_error.go index 4bcacac1..f8636a7c 100644 --- a/vendor/code.cloudfoundry.org/cli/command/translatableerror/convert_to_translatable_error.go +++ b/vendor/code.cloudfoundry.org/cli/command/translatableerror/convert_to_translatable_error.go @@ -188,6 +188,13 @@ func ConvertToTranslatableError(err error) error { return RunTaskError{Message: "App is not staged."} } + if strings.Contains(e.Message, "Unknown field(s): 'options'") { + return MinimumCFAPIVersionNotMetError{ + Command: "'--max-in-flight' flag", + MinimumVersion: "3.173.0", + } + } + // JSON Errors case *json.SyntaxError: return JSONSyntaxError{Err: e} diff --git a/vendor/code.cloudfoundry.org/cli/command/ui.go b/vendor/code.cloudfoundry.org/cli/command/ui.go index 6c479b83..424c71d2 100644 --- a/vendor/code.cloudfoundry.org/cli/command/ui.go +++ b/vendor/code.cloudfoundry.org/cli/command/ui.go @@ -33,6 +33,7 @@ type UI interface { DisplayPasswordPrompt(template string, templateValues ...map[string]interface{}) (string, error) DisplayTableWithHeader(prefix string, table [][]string, padding int) DisplayText(template string, data ...map[string]interface{}) + DisplayTextLiteral(text string) DisplayTextMenu(choices []string, promptTemplate string, templateValues ...map[string]interface{}) (string, error) DisplayTextPrompt(template string, templateValues ...map[string]interface{}) (string, error) DisplayTextWithBold(text string, keys ...map[string]interface{}) diff --git a/vendor/code.cloudfoundry.org/cli/resources/application_resource.go b/vendor/code.cloudfoundry.org/cli/resources/application_resource.go index b7b5c782..2212c19a 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/application_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/application_resource.go @@ -25,6 +25,10 @@ type Application struct { SpaceGUID string // State is the desired state of the application. State constant.ApplicationState + // Credentials are used by Cloud Native Buildpacks lifecycle to pull buildpacks + Credentials map[string]interface{} + // CurrentDropletGUID is the unique identifier of the droplet currently attached to the application. + CurrentDropletGUID string } // ApplicationNameOnly represents only the name field of a Cloud Controller V3 Application @@ -40,15 +44,19 @@ func (a Application) MarshalJSON() ([]byte, error) { Metadata: a.Metadata, } + ccApp.Relationships = Relationships{} + if a.SpaceGUID != "" { - ccApp.Relationships = Relationships{ - constant.RelationshipTypeSpace: Relationship{GUID: a.SpaceGUID}, - } + ccApp.Relationships[constant.RelationshipTypeSpace] = Relationship{GUID: a.SpaceGUID} + } + + if a.CurrentDropletGUID != "" { + ccApp.Relationships[constant.RelationshipTypeCurrentDroplet] = Relationship{GUID: a.CurrentDropletGUID} } if a.LifecycleType == constant.AppLifecycleTypeDocker { ccApp.setDockerLifecycle() - } else if a.LifecycleType == constant.AppLifecycleTypeBuildpack { + } else if a.LifecycleType == constant.AppLifecycleTypeBuildpack || a.LifecycleType == constant.AppLifecycleTypeCNB { if len(a.LifecycleBuildpacks) > 0 || a.StackName != "" { if a.hasAutodetectedBuildpack() { ccApp.setAutodetectedBuildpackLifecycle(a) @@ -79,6 +87,9 @@ func (a *Application) UnmarshalJSON(data []byte) error { a.LifecycleType = lifecycle.Type a.Name = ccApp.Name a.SpaceGUID = ccApp.Relationships[constant.RelationshipTypeSpace].GUID + if _, ok := ccApp.Relationships[constant.RelationshipTypeCurrentDroplet]; ok { + a.CurrentDropletGUID = ccApp.Relationships[constant.RelationshipTypeCurrentDroplet].GUID + } a.State = ccApp.State a.Metadata = ccApp.Metadata @@ -100,11 +111,18 @@ func (a Application) hasAutodetectedBuildpack() bool { return a.LifecycleBuildpacks[0] == constant.AutodetectBuildpackValueDefault || a.LifecycleBuildpacks[0] == constant.AutodetectBuildpackValueNull } +type ccCredentials map[string]interface{} + +func (ccCredentials) UnmarshalJSON(data []byte) error { + return nil +} + type ccLifecycle struct { Type constant.AppLifecycleType `json:"type,omitempty"` Data struct { - Buildpacks []string `json:"buildpacks,omitempty"` - Stack string `json:"stack,omitempty"` + Buildpacks []string `json:"buildpacks,omitempty"` + Stack string `json:"stack,omitempty"` + Credentials ccCredentials `json:"credentials,omitempty"` } `json:"data"` } @@ -135,6 +153,7 @@ func (ccApp *ccApplication) setBuildpackLifecycle(a Application) { lifecycle.Type = a.LifecycleType lifecycle.Data.Buildpacks = a.LifecycleBuildpacks lifecycle.Data.Stack = a.StackName + lifecycle.Data.Credentials = a.Credentials ccApp.Lifecycle = lifecycle } diff --git a/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go b/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go index 144ac076..81fa6fe9 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go @@ -8,16 +8,36 @@ import ( ) type Deployment struct { - GUID string - State constant.DeploymentState - StatusValue constant.DeploymentStatusValue - StatusReason constant.DeploymentStatusReason - RevisionGUID string - DropletGUID string - CreatedAt string - UpdatedAt string - Relationships Relationships - NewProcesses []Process + GUID string + State constant.DeploymentState + StatusValue constant.DeploymentStatusValue + StatusReason constant.DeploymentStatusReason + LastStatusChange string + Options DeploymentOpts + RevisionGUID string + DropletGUID string + CreatedAt string + UpdatedAt string + Relationships Relationships + NewProcesses []Process + Strategy constant.DeploymentStrategy +} + +type DeploymentOpts struct { + MaxInFlight int `json:"max_in_flight,omitempty"` + CanaryDeploymentOptions *CanaryDeploymentOptions `json:"canary,omitempty"` +} + +func (d DeploymentOpts) IsEmpty() bool { + return d.MaxInFlight == 0 && (d.CanaryDeploymentOptions == nil || len(d.CanaryDeploymentOptions.Steps) == 0) +} + +type CanaryDeploymentOptions struct { + Steps []CanaryStep `json:"steps"` +} + +type CanaryStep struct { + InstanceWeight int64 `json:"instance_weight"` } // MarshalJSON converts a Deployment into a Cloud Controller Deployment. @@ -30,9 +50,11 @@ func (d Deployment) MarshalJSON() ([]byte, error) { } var ccDeployment struct { - Droplet *Droplet `json:"droplet,omitempty"` - Revision *Revision `json:"revision,omitempty"` - Relationships Relationships `json:"relationships,omitempty"` + Droplet *Droplet `json:"droplet,omitempty"` + Options *DeploymentOpts `json:"options,omitempty"` + Revision *Revision `json:"revision,omitempty"` + Strategy constant.DeploymentStrategy `json:"strategy,omitempty"` + Relationships Relationships `json:"relationships,omitempty"` } if d.DropletGUID != "" { @@ -43,6 +65,14 @@ func (d Deployment) MarshalJSON() ([]byte, error) { ccDeployment.Revision = &Revision{d.RevisionGUID} } + if d.Strategy != "" { + ccDeployment.Strategy = d.Strategy + } + + if !d.Options.IsEmpty() { + ccDeployment.Options = &d.Options + } + ccDeployment.Relationships = d.Relationships return json.Marshal(ccDeployment) @@ -56,12 +86,18 @@ func (d *Deployment) UnmarshalJSON(data []byte) error { Relationships Relationships `json:"relationships,omitempty"` State constant.DeploymentState `json:"state,omitempty"` Status struct { + Details struct { + LastStatusChange string `json:"last_status_change"` + } Value constant.DeploymentStatusValue `json:"value"` Reason constant.DeploymentStatusReason `json:"reason"` } `json:"status"` - Droplet Droplet `json:"droplet,omitempty"` - NewProcesses []Process `json:"new_processes,omitempty"` + Droplet Droplet `json:"droplet,omitempty"` + NewProcesses []Process `json:"new_processes,omitempty"` + Strategy constant.DeploymentStrategy `json:"strategy"` + Options DeploymentOpts `json:"options,omitempty"` } + err := cloudcontroller.DecodeJSON(data, &ccDeployment) if err != nil { return err @@ -73,8 +109,11 @@ func (d *Deployment) UnmarshalJSON(data []byte) error { d.State = ccDeployment.State d.StatusValue = ccDeployment.Status.Value d.StatusReason = ccDeployment.Status.Reason + d.LastStatusChange = ccDeployment.Status.Details.LastStatusChange d.DropletGUID = ccDeployment.Droplet.GUID d.NewProcesses = ccDeployment.NewProcesses + d.Strategy = ccDeployment.Strategy + d.Options = ccDeployment.Options return nil } diff --git a/vendor/code.cloudfoundry.org/cli/resources/droplet_resource.go b/vendor/code.cloudfoundry.org/cli/resources/droplet_resource.go index 27dc2574..acd9f1f4 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/droplet_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/droplet_resource.go @@ -1,12 +1,17 @@ package resources import ( + "encoding/json" + + "code.cloudfoundry.org/cli/api/cloudcontroller" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" ) // Droplet represents a Cloud Controller droplet's metadata. A droplet is a set of // compiled bits for a given application. type Droplet struct { + // AppGUID is the unique identifier of the application associated with the droplet. + AppGUID string `json:"app_guid"` //Buildpacks are the detected buildpacks from the staging process. Buildpacks []DropletBuildpack `json:"buildpacks,omitempty"` // CreatedAt is the timestamp that the Cloud Controller created the droplet. @@ -35,3 +40,87 @@ type DropletBuildpack struct { // Version is the version of the detected buildpack. Version string `json:"version"` } + +func (d Droplet) MarshallJSON() ([]byte, error) { + type ccDroplet struct { + GUID string `json:"guid,omitempty"` + Buildpacks []DropletBuildpack `json:"buildpacks,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + Image string `json:"image,omitempty"` + Stack string `json:"stack,omitempty"` + State constant.DropletState `json:"state,omitempty"` + Relationships *struct { + App struct { + Data struct { + GUID string `json:"guid,omitempty"` + } `json:"data,omitempty"` + } `json:"app,omitempty"` + } `json:"relationships,omitempty"` + } + + ccD := ccDroplet{ + GUID: d.GUID, + Buildpacks: d.Buildpacks, + CreatedAt: d.CreatedAt, + Image: d.Image, + Stack: d.Stack, + State: d.State, + } + + if d.AppGUID != "" { + ccD.Relationships = &struct { + App struct { + Data struct { + GUID string `json:"guid,omitempty"` + } `json:"data,omitempty"` + } `json:"app,omitempty"` + }{ + App: struct { + Data struct { + GUID string `json:"guid,omitempty"` + } `json:"data,omitempty"` + }{ + Data: struct { + GUID string `json:"guid,omitempty"` + }{ + GUID: d.AppGUID, + }, + }, + } + } + + return json.Marshal(ccD) +} + +func (d *Droplet) UnmarshalJSON(data []byte) error { + var alias struct { + GUID string `json:"guid,omitempty"` + Buildpacks []DropletBuildpack `json:"buildpacks,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + Image string `json:"image,omitempty"` + Stack string `json:"stack,omitempty"` + State constant.DropletState `json:"state,omitempty"` + Relationships struct { + App struct { + Data struct { + GUID string `json:"guid,omitempty"` + } `json:"data,omitempty"` + } `json:"app,omitempty"` + } + } + + err := cloudcontroller.DecodeJSON(data, &alias) + if err != nil { + return err + } + + d.GUID = alias.GUID + d.Buildpacks = alias.Buildpacks + d.CreatedAt = alias.CreatedAt + d.Image = alias.Image + d.Stack = alias.Stack + d.State = alias.State + d.AppGUID = alias.Relationships.App.Data.GUID + + return nil +} diff --git a/vendor/code.cloudfoundry.org/cli/resources/metadata_resource.go b/vendor/code.cloudfoundry.org/cli/resources/metadata_resource.go index 414fe142..afa1c180 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/metadata_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/metadata_resource.go @@ -3,7 +3,8 @@ package resources import "code.cloudfoundry.org/cli/types" type Metadata struct { - Labels map[string]types.NullString `json:"labels,omitempty"` + Annotations map[string]types.NullString `json:"annotations,omitempty"` + Labels map[string]types.NullString `json:"labels,omitempty"` } type ResourceMetadata struct { diff --git a/vendor/code.cloudfoundry.org/cli/resources/options_resource.go b/vendor/code.cloudfoundry.org/cli/resources/options_resource.go new file mode 100644 index 00000000..1730b9d4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/resources/options_resource.go @@ -0,0 +1,24 @@ +package resources + +import "strings" + +func CreateRouteOptions(options []string) (map[string]*string, *string) { + routeOptions := map[string]*string{} + for _, option := range options { + key, value, found := strings.Cut(option, "=") + if found { + routeOptions[key] = &value + } else { + return routeOptions, &option + } + } + return routeOptions, nil +} + +func RemoveRouteOptions(options []string) map[string]*string { + routeOptions := map[string]*string{} + for _, option := range options { + routeOptions[option] = nil + } + return routeOptions +} diff --git a/vendor/code.cloudfoundry.org/cli/resources/process_resource.go b/vendor/code.cloudfoundry.org/cli/resources/process_resource.go index afbf20a0..8eb59169 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/process_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/process_resource.go @@ -13,16 +13,20 @@ type Process struct { GUID string Type string // Command is the process start command. Note: This value will be obfuscated when obtained from listing. - Command types.FilteredString - HealthCheckType constant.HealthCheckType - HealthCheckEndpoint string - HealthCheckInvocationTimeout int64 - HealthCheckTimeout int64 - Instances types.NullInt - MemoryInMB types.NullUint64 - DiskInMB types.NullUint64 - LogRateLimitInBPS types.NullInt - AppGUID string + Command types.FilteredString + HealthCheckType constant.HealthCheckType + HealthCheckEndpoint string + HealthCheckInvocationTimeout int64 + HealthCheckTimeout int64 + ReadinessHealthCheckType constant.HealthCheckType + ReadinessHealthCheckEndpoint string + ReadinessHealthCheckInvocationTimeout int64 + ReadinessHealthCheckInterval int64 + Instances types.NullInt + MemoryInMB types.NullUint64 + DiskInMB types.NullUint64 + LogRateLimitInBPS types.NullInt + AppGUID string } func (p Process) MarshalJSON() ([]byte, error) { @@ -34,6 +38,7 @@ func (p Process) MarshalJSON() ([]byte, error) { marshalDisk(p, &ccProcess) marshalLogRateLimit(p, &ccProcess) marshalHealthCheck(p, &ccProcess) + marshalReadinessHealthCheck(p, &ccProcess) return json.Marshal(ccProcess) } @@ -57,6 +62,15 @@ func (p *Process) UnmarshalJSON(data []byte) error { Timeout int64 `json:"timeout"` } `json:"data"` } `json:"health_check"` + + ReadinessHealthCheck struct { + Type constant.HealthCheckType `json:"type"` + Data struct { + Endpoint string `json:"endpoint"` + InvocationTimeout int64 `json:"invocation_timeout"` + Interval int64 `json:"interval"` + } `json:"data"` + } `json:"readiness_health_check"` } err := cloudcontroller.DecodeJSON(data, &ccProcess) @@ -71,6 +85,10 @@ func (p *Process) UnmarshalJSON(data []byte) error { p.HealthCheckInvocationTimeout = ccProcess.HealthCheck.Data.InvocationTimeout p.HealthCheckTimeout = ccProcess.HealthCheck.Data.Timeout p.HealthCheckType = ccProcess.HealthCheck.Type + p.ReadinessHealthCheckEndpoint = ccProcess.ReadinessHealthCheck.Data.Endpoint + p.ReadinessHealthCheckType = ccProcess.ReadinessHealthCheck.Type + p.ReadinessHealthCheckInvocationTimeout = ccProcess.ReadinessHealthCheck.Data.InvocationTimeout + p.ReadinessHealthCheckInterval = ccProcess.ReadinessHealthCheck.Data.Interval p.Instances = ccProcess.Instances p.MemoryInMB = ccProcess.MemoryInMB p.LogRateLimitInBPS = ccProcess.LogRateLimitInBPS @@ -89,6 +107,15 @@ type healthCheck struct { } `json:"data"` } +type readinessHealthCheck struct { + Type constant.HealthCheckType `json:"type,omitempty"` + Data struct { + Endpoint interface{} `json:"endpoint,omitempty"` + InvocationTimeout int64 `json:"invocation_timeout,omitempty"` + Interval int64 `json:"interval,omitempty"` + } `json:"data"` +} + type marshalProcess struct { Command interface{} `json:"command,omitempty"` Instances json.Number `json:"instances,omitempty"` @@ -96,7 +123,8 @@ type marshalProcess struct { DiskInMB json.Number `json:"disk_in_mb,omitempty"` LogRateLimitInBPS json.Number `json:"log_rate_limit_in_bytes_per_second,omitempty"` - HealthCheck *healthCheck `json:"health_check,omitempty"` + HealthCheck *healthCheck `json:"health_check,omitempty"` + ReadinessHealthCheck *readinessHealthCheck `json:"readiness_health_check,omitempty"` } func marshalCommand(p Process, ccProcess *marshalProcess) { @@ -123,6 +151,18 @@ func marshalHealthCheck(p Process, ccProcess *marshalProcess) { } } +func marshalReadinessHealthCheck(p Process, ccProcess *marshalProcess) { + if p.ReadinessHealthCheckType != "" || p.ReadinessHealthCheckEndpoint != "" || p.ReadinessHealthCheckInvocationTimeout != 0 { + ccProcess.ReadinessHealthCheck = new(readinessHealthCheck) + ccProcess.ReadinessHealthCheck.Type = p.ReadinessHealthCheckType + ccProcess.ReadinessHealthCheck.Data.InvocationTimeout = p.ReadinessHealthCheckInvocationTimeout + ccProcess.ReadinessHealthCheck.Data.Interval = p.ReadinessHealthCheckInterval + if p.ReadinessHealthCheckEndpoint != "" { + ccProcess.ReadinessHealthCheck.Data.Endpoint = p.ReadinessHealthCheckEndpoint + } + } +} + func marshalInstances(p Process, ccProcess *marshalProcess) { if p.Instances.IsSet { ccProcess.Instances = json.Number(fmt.Sprint(p.Instances.Value)) diff --git a/vendor/code.cloudfoundry.org/cli/resources/revision_resource.go b/vendor/code.cloudfoundry.org/cli/resources/revision_resource.go index 39d577a2..e187c951 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/revision_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/revision_resource.go @@ -1,11 +1,13 @@ package resources type Revision struct { - GUID string `json:"guid"` - Version int `json:"version"` - Deployable bool `json:"deployable"` - Description string `json:"description"` - Droplet Droplet `json:"droplet"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` + GUID string `json:"guid"` + Version int `json:"version"` + Deployable bool `json:"deployable"` + Description string `json:"description"` + Droplet Droplet `json:"droplet"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Links APILinks `json:"links"` + Metadata *Metadata `json:"metadata,omitempty"` } diff --git a/vendor/code.cloudfoundry.org/cli/resources/route_resource.go b/vendor/code.cloudfoundry.org/cli/resources/route_resource.go index 5bf31b49..6ec9efcf 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/route_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/route_resource.go @@ -2,6 +2,7 @@ package resources import ( "encoding/json" + "strings" "code.cloudfoundry.org/cli/api/cloudcontroller" ) @@ -31,6 +32,7 @@ type Route struct { URL string Destinations []RouteDestination Metadata *Metadata + Options map[string]*string } func (r Route) MarshalJSON() ([]byte, error) { @@ -49,12 +51,13 @@ func (r Route) MarshalJSON() ([]byte, error) { // Building up the request body in ccRoute type ccRoute struct { - GUID string `json:"guid,omitempty"` - Host string `json:"host,omitempty"` - Path string `json:"path,omitempty"` - Protocol string `json:"protocol,omitempty"` - Port int `json:"port,omitempty"` - Relationships *Relationships `json:"relationships,omitempty"` + GUID string `json:"guid,omitempty"` + Host string `json:"host,omitempty"` + Path string `json:"path,omitempty"` + Protocol string `json:"protocol,omitempty"` + Port int `json:"port,omitempty"` + Relationships *Relationships `json:"relationships,omitempty"` + Options map[string]*string `json:"options,omitempty"` } ccR := ccRoute{ @@ -63,6 +66,7 @@ func (r Route) MarshalJSON() ([]byte, error) { Path: r.Path, Protocol: r.Protocol, Port: r.Port, + Options: r.Options, } if r.SpaceGUID != "" { @@ -85,6 +89,7 @@ func (r *Route) UnmarshalJSON(data []byte) error { URL string `json:"url,omitempty"` Destinations []RouteDestination `json:"destinations,omitempty"` Metadata *Metadata `json:"metadata,omitempty"` + Options map[string]*string `json:"options,omitempty"` Relationships struct { Space struct { @@ -115,6 +120,19 @@ func (r *Route) UnmarshalJSON(data []byte) error { r.URL = alias.URL r.Destinations = alias.Destinations r.Metadata = alias.Metadata + r.Options = alias.Options return nil } + +func (r *Route) FormattedOptions() string { + var routeOpts = []string{} + formattedOptions := "" + if r.Options != nil && len(r.Options) > 0 { + for optKey, optVal := range r.Options { + routeOpts = append(routeOpts, optKey+"="+*optVal) + } + formattedOptions = " {" + strings.Join(routeOpts, ", ") + "}" + } + return formattedOptions +} diff --git a/vendor/code.cloudfoundry.org/cli/resources/task_resource.go b/vendor/code.cloudfoundry.org/cli/resources/task_resource.go index 62f1f500..6a0184ff 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/task_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/task_resource.go @@ -32,6 +32,9 @@ type Task struct { // Using a pointer so that it can be set to nil to prevent // json serialization when no template is used Template *TaskTemplate `json:"template,omitempty"` + + // Result contains the task result + Result *TaskResult `json:"result,omitempty"` } type TaskTemplate struct { @@ -41,3 +44,7 @@ type TaskTemplate struct { type TaskProcessTemplate struct { Guid string `json:"guid,omitempty"` } + +type TaskResult struct { + FailureReason string `json:"failure_reason,omitempty"` +} diff --git a/vendor/code.cloudfoundry.org/cli/util/clissh/ssh.go b/vendor/code.cloudfoundry.org/cli/util/clissh/ssh.go index a6f32ce7..b4f29212 100644 --- a/vendor/code.cloudfoundry.org/cli/util/clissh/ssh.go +++ b/vendor/code.cloudfoundry.org/cli/util/clissh/ssh.go @@ -5,6 +5,7 @@ import ( "crypto/sha1" "crypto/sha256" "encoding/base64" + "encoding/hex" "errors" "fmt" "io" @@ -28,6 +29,7 @@ const ( md5FingerprintLength = 47 // inclusive of space between bytes hexSha1FingerprintLength = 59 // inclusive of space between bytes base64Sha256FingerprintLength = 43 + sha256FingerprintLength = 64 DefaultKeepAliveInterval = 30 * time.Second ) @@ -331,9 +333,12 @@ func (c *SecureShell) terminalType() string { return term } -func base64Sha256Fingerprint(key ssh.PublicKey) string { +func sha256Fingerprint(key ssh.PublicKey, encode bool) string { sum := sha256.Sum256(key.Marshal()) - return base64.RawStdEncoding.EncodeToString(sum[:]) + if encode { + return base64.RawStdEncoding.EncodeToString(sum[:]) + } + return hex.EncodeToString(sum[:]) } func copyAndClose(wg *sync.WaitGroup, dest io.WriteCloser, src io.Reader) { @@ -364,8 +369,10 @@ func fingerprintCallback(skipHostValidation bool, expectedFingerprint string) ss var fingerprint string switch len(expectedFingerprint) { + case sha256FingerprintLength: + fingerprint = sha256Fingerprint(key, false) case base64Sha256FingerprintLength: - fingerprint = base64Sha256Fingerprint(key) + fingerprint = sha256Fingerprint(key, true) case hexSha1FingerprintLength: fingerprint = hexSha1Fingerprint(key) case md5FingerprintLength: diff --git a/vendor/code.cloudfoundry.org/cli/util/configv3/env.go b/vendor/code.cloudfoundry.org/cli/util/configv3/env.go index efd935e3..27e15504 100644 --- a/vendor/code.cloudfoundry.org/cli/util/configv3/env.go +++ b/vendor/code.cloudfoundry.org/cli/util/configv3/env.go @@ -1,6 +1,7 @@ package configv3 import ( + "encoding/json" "strconv" "strings" "time" @@ -20,6 +21,7 @@ type EnvOverride struct { CFTrace string CFUsername string DockerPassword string + CNBCredentials string Experimental string ForceTTY string HTTPSProxy string @@ -61,6 +63,21 @@ func (config *Config) DockerPassword() string { return config.ENV.DockerPassword } +// CNBCredentials retrurns CNB credentials from the environment +func (config *Config) CNBCredentials() (map[string]interface{}, error) { + if config.ENV.CNBCredentials == "" { + return nil, nil + } + + creds := map[string]interface{}{} + + if err := json.Unmarshal([]byte(config.ENV.CNBCredentials), &creds); err != nil { + return nil, err + } + + return creds, nil +} + // Experimental returns whether or not to run experimental CLI commands. This // is based on the following: // 1. The $CF_CLI_EXPERIMENTAL environment variable if set diff --git a/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go b/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go index c57b8451..d0a0c8b5 100644 --- a/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go +++ b/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go @@ -128,6 +128,7 @@ func LoadConfig(flags ...FlagOverride) (*Config, error) { CFTrace: os.Getenv("CF_TRACE"), CFUsername: os.Getenv("CF_USERNAME"), DockerPassword: os.Getenv("CF_DOCKER_PASSWORD"), + CNBCredentials: os.Getenv("CF_CNB_REGISTRY_CREDS"), Experimental: os.Getenv("CF_CLI_EXPERIMENTAL"), ForceTTY: os.Getenv("FORCE_TTY"), HTTPSProxy: os.Getenv("https_proxy"), diff --git a/vendor/code.cloudfoundry.org/cli/util/ui/request_logger_file_writer.go b/vendor/code.cloudfoundry.org/cli/util/ui/request_logger_file_writer.go index a2d7aca1..347c556d 100644 --- a/vendor/code.cloudfoundry.org/cli/util/ui/request_logger_file_writer.go +++ b/vendor/code.cloudfoundry.org/cli/util/ui/request_logger_file_writer.go @@ -29,7 +29,7 @@ func newRequestLoggerFileWriter(ui *UI, lock *sync.Mutex, filePaths []string) *R func (display *RequestLoggerFileWriter) DisplayBody([]byte) error { for _, logFile := range display.logFiles { - _, err := logFile.WriteString(RedactedValue) + _, err := logFile.WriteString(fmt.Sprintf("%s\n", RedactedValue)) if err != nil { return err } diff --git a/vendor/code.cloudfoundry.org/cli/util/ui/ui.go b/vendor/code.cloudfoundry.org/cli/util/ui/ui.go index c2a634ab..7d51d6b0 100644 --- a/vendor/code.cloudfoundry.org/cli/util/ui/ui.go +++ b/vendor/code.cloudfoundry.org/cli/util/ui/ui.go @@ -248,6 +248,15 @@ func (ui *UI) DisplayText(template string, templateValues ...map[string]interfac fmt.Fprintf(ui.Out, "%s\n", ui.TranslateText(template, templateValues...)) } +// DisplayTextLiteral outputs the text to ui.Out without modification. +// This function should only be used when no translation or templating is required. +func (ui *UI) DisplayTextLiteral(text string) { + ui.terminalLock.Lock() + defer ui.terminalLock.Unlock() + + fmt.Fprintf(ui.Out, "%s\n", text) +} + // DisplayTextWithBold translates the template, bolds the templateValues, // substitutes templateValues into the template, and outputs // the result to ui.Out. Only the first map in templateValues is used. diff --git a/vendor/code.cloudfoundry.org/cli/version/version.go b/vendor/code.cloudfoundry.org/cli/version/version.go index 47a77f87..4878c51b 100644 --- a/vendor/code.cloudfoundry.org/cli/version/version.go +++ b/vendor/code.cloudfoundry.org/cli/version/version.go @@ -1,6 +1,10 @@ package version -import "github.com/blang/semver/v4" +import ( + "strings" + + "github.com/blang/semver/v4" +) const DefaultVersion = "0.0.0-unknown-version" @@ -11,6 +15,8 @@ var ( ) func VersionString() string { + // Remove the "v" prefix from the binary in case it is present + binaryVersion = strings.TrimPrefix(binaryVersion, "v") versionString, err := semver.Make(binaryVersion) if err != nil { versionString = semver.MustParse(DefaultVersion) diff --git a/vendor/code.cloudfoundry.org/lager/v3/README.md b/vendor/code.cloudfoundry.org/lager/v3/README.md index 9a4248ad..568ea142 100644 --- a/vendor/code.cloudfoundry.org/lager/v3/README.md +++ b/vendor/code.cloudfoundry.org/lager/v3/README.md @@ -1,102 +1,34 @@ -lager -===== +# lager -**Note**: This repository should be imported as `code.cloudfoundry.org/lager`. +[![Go Report +Card](https://goreportcard.com/badge/code.cloudfoundry.org/lager/v3)](https://goreportcard.com/report/code.cloudfoundry.org/lager/v3) +[![Go +Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/lager.svg)](https://pkg.go.dev/code.cloudfoundry.org/lager/v3) -Lager is a logging library for go. +Lager is a logging library for go -## Usage +> \[!NOTE\] +> +> This repository should be imported as +> `code.cloudfoundry.org/lager/v3`. -Instantiate a logger with the name of your component. +# Docs -```go -import ( - "code.cloudfoundry.org/lager/v3" -) +- [Usage](./docs/usage.md) -logger := lager.NewLogger("my-app") -``` +# Contributing -### Lager and [`log/slog`](https://pkg.go.dev/log/slog) -Lager was written long before Go 1.21 introduced structured logging in the standard library. -There are some wrapper functions for interoperability between Lager and `slog`, -which are only available when using Go 1.21 and higher. +See the [Contributing.md](./.github/CONTRIBUTING.md) for more +information on how to contribute. -Lager can be used as an [`slog.Handler`](https://pkg.go.dev/log/slog#Handler) using the `NewHandler()` function: +# Working Group Charter -```go -func codeThatAcceptsSlog(l *slog.Logger) { ... } +This repository is maintained by [App Runtime +Platform](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/app-runtime-platform.md) +under `Diego` area. -lagerLogger := lager.NewLogger("my-lager-logger") - -codeThatAcceptsSlog(slog.New(lager.NewHandler(lagerLogger))) -``` - -An `slog.Logger` can be used as a Lager `Sink` using the `NewSlogSink()` function: -```go -var *slog.Logger l = codeThatReturnsSlog() - -lagerLogger := lager.NewLogger("my-lager-logger") - -lagerLogger.RegisterSink(lager.NewSlogSink(l)) -``` - -### Sinks - -Lager can write logs to a variety of destinations. You can specify the destinations -using Lager sinks: - -To write to an arbitrary `Writer` object: - -```go -logger.RegisterSink(lager.NewWriterSink(myWriter, lager.INFO)) -``` - -### Emitting logs - -Lager supports the usual level-based logging, with an optional argument for arbitrary key-value data. - -```go -logger.Info("doing-stuff", lager.Data{ - "informative": true, -}) -``` - -output: -```json -{ "source": "my-app", "message": "doing-stuff", "data": { "informative": true }, "timestamp": 1232345, "log_level": 1 } -``` - -Error messages also take an `Error` object: - -```go -logger.Error("failed-to-do-stuff", errors.New("Something went wrong")) -``` - -output: -```json -{ "source": "my-app", "message": "failed-to-do-stuff", "data": { "error": "Something went wrong" }, "timestamp": 1232345, "log_level": 1 } -``` - -### Sessions - -You can avoid repetition of contextual data using 'Sessions': - -```go - -contextualLogger := logger.Session("my-task", lager.Data{ - "request-id": 5, -}) - -contextualLogger.Info("my-action") -``` - -output: - -```json -{ "source": "my-app", "message": "my-task.my-action", "data": { "request-id": 5 }, "timestamp": 1232345, "log_level": 1 } -``` - -## License - -Lager is [Apache 2.0](https://github.com/cloudfoundry/lager/blob/master/LICENSE) licensed. +> \[!IMPORTANT\] +> +> Content in this file is managed by the [CI task +> `sync-readme`](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/blob/main/shared/tasks/sync-readme/metadata.yml) +> and is generated by CI following a convention. diff --git a/vendor/code.cloudfoundry.org/lager/v3/handler.go b/vendor/code.cloudfoundry.org/lager/v3/handler.go index 2cdaf7c3..092799b2 100644 --- a/vendor/code.cloudfoundry.org/lager/v3/handler.go +++ b/vendor/code.cloudfoundry.org/lager/v3/handler.go @@ -145,7 +145,11 @@ func processAttr(attr slog.Attr, target map[string]any) { case attr.Key == "": // skip default: - target[attr.Key] = rv.Any() + if rvAsError, isError := rv.Any().(error); isError { + target[attr.Key] = rvAsError.Error() + } else { + target[attr.Key] = rv.Any() + } } } diff --git a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go index f4fda22d..30ccf098 100644 --- a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go +++ b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go @@ -7,9 +7,10 @@ import ( // Value recursively walks through the value provided by `v` and truncates // any strings longer than `maxLength`. // Example: -// type foobar struct{A string; B string} -// truncate.Value(foobar{A:"foo",B:"bar"}, 20) == foobar{A:"foo",B:"bar"} -// truncate.Value(foobar{A:strings.Repeat("a", 25),B:"bar"}, 20) == foobar{A:"aaaaaaaa-(truncated)",B:"bar"} +// +// type foobar struct{A string; B string} +// truncate.Value(foobar{A:"foo",B:"bar"}, 20) == foobar{A:"foo",B:"bar"} +// truncate.Value(foobar{A:strings.Repeat("a", 25),B:"bar"}, 20) == foobar{A:"aaaaaaaa-(truncated)",B:"bar"} func Value(v interface{}, maxLength int) interface{} { rv := reflect.ValueOf(v) tv := truncateValue(rv, maxLength) @@ -158,9 +159,10 @@ const lenTruncated = len(truncated) // If the string is shorter than the string "-(truncated)" and the string // exceeds `maxLength`, the output will not be truncated. // Example: -// truncate.String(strings.Repeat("a", 25), 20) == "aaaaaaaa-(truncated)" -// truncate.String("foobar", 20) == "foobar" -// truncate.String("foobar", 5) == "foobar" +// +// truncate.String(strings.Repeat("a", 25), 20) == "aaaaaaaa-(truncated)" +// truncate.String("foobar", 20) == "foobar" +// truncate.String("foobar", 5) == "foobar" func String(s string, maxLength int) string { if maxLength <= 0 || len(s) < lenTruncated || len(s) <= maxLength { return s diff --git a/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go index 17a30295..37e18d1a 100644 --- a/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go +++ b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go @@ -13,21 +13,20 @@ type redactingSink struct { // data field. The old behavior of NewRedactingWriterSink (which was removed // in v2) can be obtained using the following code: // -// redactingSink, err := NewRedactingSink( -// NewWriterSink(writer, minLogLevel), -// keyPatterns, -// valuePatterns, -// ) +// redactingSink, err := NewRedactingSink( +// NewWriterSink(writer, minLogLevel), +// keyPatterns, +// valuePatterns, +// ) // -// if err != nil { -// return nil, err -// } -// -// return NewReconfigurableSink( -// redactingSink, -// minLogLevel, -// ), nil +// if err != nil { +// return nil, err +// } // +// return NewReconfigurableSink( +// redactingSink, +// minLogLevel, +// ), nil func NewRedactingSink(sink Sink, keyPatterns []string, valuePatterns []string) (Sink, error) { jsonRedacter, err := NewJSONRedacter(keyPatterns, valuePatterns) if err != nil { diff --git a/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go index ba261fe7..79e4d5b4 100644 --- a/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go +++ b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go @@ -10,11 +10,12 @@ type truncatingSink struct { // NewTruncatingSink returns a sink that truncates strings longer than the max // data string length // Example: -// writerSink := lager.NewWriterSink(os.Stdout, lager.INFO) -// sink := lager.NewTruncatingSink(testSink, 20) -// logger := lager.NewLogger("test") -// logger.RegisterSink(sink) -// logger.Info("message", lager.Data{"A": strings.Repeat("a", 25)}) +// +// writerSink := lager.NewWriterSink(os.Stdout, lager.INFO) +// sink := lager.NewTruncatingSink(testSink, 20) +// logger := lager.NewLogger("test") +// logger.RegisterSink(sink) +// logger.Info("message", lager.Data{"A": strings.Repeat("a", 25)}) func NewTruncatingSink(sink Sink, maxDataStringLength int) Sink { return &truncatingSink{ sink: sink, diff --git a/vendor/modules.txt b/vendor/modules.txt index b0c4b331..cc6fb9c5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -19,8 +19,8 @@ code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/cfnetv1 code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/cfnetv1/internal code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/networkerror code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/wrapper -# code.cloudfoundry.org/cli v0.0.0-20240609151540-b78406a9b0ce -## explicit; go 1.22 +# code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 +## explicit; go 1.23.6 code.cloudfoundry.org/cli/actor/actionerror code.cloudfoundry.org/cli/actor/sharedaction code.cloudfoundry.org/cli/actor/v7action @@ -86,8 +86,8 @@ code.cloudfoundry.org/jsonry code.cloudfoundry.org/jsonry/internal/errorcontext code.cloudfoundry.org/jsonry/internal/path code.cloudfoundry.org/jsonry/internal/tree -# code.cloudfoundry.org/lager/v3 v3.0.3 -## explicit; go 1.19 +# code.cloudfoundry.org/lager/v3 v3.27.0 +## explicit; go 1.22.0 code.cloudfoundry.org/lager/v3 code.cloudfoundry.org/lager/v3/internal/truncate # code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 @@ -287,8 +287,8 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openzipkin/zipkin-go v0.4.2 -## explicit; go 1.18 +# github.com/openzipkin/zipkin-go v0.4.3 +## explicit; go 1.20 github.com/openzipkin/zipkin-go/idgenerator github.com/openzipkin/zipkin-go/model # github.com/prometheus/client_golang v1.22.0 From a52684f6081f00b536abb4f05c53a45b35c0d718 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 1 May 2025 11:45:07 +0200 Subject: [PATCH 08/13] deactivate completely calls to /v2/spaces/:space_guid/summary --- collectors/applications.go | 22 +++++++++------------- fetcher/fetcher_handlers.go | 25 +------------------------ models/model.go | 2 -- 3 files changed, 10 insertions(+), 39 deletions(-) diff --git a/collectors/applications.go b/collectors/applications.go index a1bba4b0..dd5cf5be 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -285,20 +285,16 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m } } } - } else if len(objs.AppSummaries) > 0 { - if appSummary, ok := objs.AppSummaries[application.GUID]; ok { - runningInstances = appSummary.RunningInstances - } + c.applicationInstancesRunningMetric.WithLabelValues( + application.GUID, + application.Name, + organization.GUID, + organization.Name, + space.GUID, + space.Name, + string(application.State), + ).Set(float64(runningInstances)) } - c.applicationInstancesRunningMetric.WithLabelValues( - application.GUID, - application.Name, - organization.GUID, - organization.Name, - space.GUID, - space.Name, - string(application.State), - ).Set(float64(runningInstances)) c.applicationMemoryMbMetric.WithLabelValues( application.GUID, diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index c73b3625..be7a0787 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -1,12 +1,9 @@ package fetcher import ( - "fmt" "regexp" "time" - "github.com/cloudfoundry/cf_exporter/filters" - models2 "code.cloudfoundry.org/bbs/models" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" @@ -73,30 +70,10 @@ func (c *Fetcher) fetchOrgQuotas(session *SessionExt, _ *BBSClient, entry *model // fetchSpaces // 1. silent fail because space may have been deleted between listing and // summary fetching attempt. See cloudfoundry/cf_exporter#85 -func (c *Fetcher) fetchSpaces(session *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaces(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { spaces, _, _, err := session.V3().GetSpaces(LargeQuery) if err == nil { loadIndex(entry.Spaces, spaces, func(r resources.Space) string { return r.GUID }) - if bbs == nil { - total := len(spaces) - for idx := 0; idx < total; idx++ { - space := spaces[idx] - name := fmt.Sprintf("space_summaries %04d/%04d (%s)", idx, total, space.GUID) - c.worker.PushIf(name, func(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { - spaceSum, err := session.GetSpaceSummary(space.GUID) - if err == nil { - c.Lock() - for _, app := range spaceSum.Apps { - entry.AppSummaries[app.GUID] = app - } - c.Unlock() - } else { - log.WithError(err).Warnf("could not fetch space '%s' summary", space.GUID) - } - return nil - }, filters.Applications) - } - } } return err } diff --git a/models/model.go b/models/model.go index 6c46a6ae..a40a80ac 100644 --- a/models/model.go +++ b/models/model.go @@ -33,7 +33,6 @@ type CFObjects struct { ServiceOfferings map[string]resources.ServiceOffering `json:"service_offerings"` ServicePlans map[string]resources.ServicePlan `json:"service_plans"` ServiceBindings map[string]resources.ServiceCredentialBinding `json:"service_bindings"` - AppSummaries map[string]AppSummary `json:"app_summaries"` AppProcesses map[string][]resources.Process `json:"app_processes"` ProcessActualLRPs map[string][]*models.ActualLRP `json:"process_actual_lrps"` Events map[string]Event `json:"events"` @@ -175,7 +174,6 @@ func NewCFObjects() *CFObjects { ServiceOfferings: map[string]resources.ServiceOffering{}, ServicePlans: map[string]resources.ServicePlan{}, ServiceBindings: map[string]resources.ServiceCredentialBinding{}, - AppSummaries: map[string]AppSummary{}, AppProcesses: map[string][]resources.Process{}, ProcessActualLRPs: map[string][]*models.ActualLRP{}, Users: map[string]resources.User{}, From 488dd15e11ae46511b20c0a5d842751d1e93110a Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Sat, 3 May 2025 10:44:41 +0200 Subject: [PATCH 09/13] bump code.cloudfoundry.org/cli to v8.13.0 and github.com/cloudfoundry-community/go-cf-clients-helper/v2 to v2.9.0 --- fetcher/bbs_client.go | 6 +- go.mod | 29 +- go.sum | 81 ++- .../cli/actor/v7action/auth.go | 4 +- .../actor/v7action/cloud_controller_client.go | 3 +- .../cli/actor/v7action/deployment.go | 2 +- .../cli/actor/v7action/info.go | 11 +- .../cli/actor/v7action/ssh.go | 2 +- .../cli/actor/v7action/target.go | 2 +- .../cli/api/cloudcontroller/ccv3/client.go | 2 +- .../cli/api/cloudcontroller/ccv3/info.go | 124 +--- .../ccv3/internal/api_routes.go | 2 + .../cli/api/cloudcontroller/ccv3/root.go | 143 ++++ .../ccversion/minimum_version.go | 2 + .../cloudcontroller/wrapper/trace_request.go | 31 + .../cli/api/router/wrapper/trace_request.go | 31 + .../cli/api/shared/trace_headers.go | 37 + .../code.cloudfoundry.org/cli/api/uaa/auth.go | 11 + .../cli/api/uaa/constant/grant_type.go | 2 + .../cli/api/uaa/refresh_token.go | 2 +- .../cli/api/uaa/wrapper/trace_request.go | 33 + .../cli/api/uaa/wrapper/uaa_authentication.go | 1 + .../configuration/coreconfig/config_data.go | 1 + .../cli/command/config.go | 1 + .../cli/resources/deployment_resource.go | 16 +- .../cli/util/configv3/env.go | 10 + .../cli/util/configv3/load_config.go | 1 + .../cli/util/trace/trace.go | 25 + .../go-cf-clients-helper/v2/session.go | 10 +- vendor/github.com/google/gofuzz/.travis.yml | 10 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 -- vendor/github.com/google/gofuzz/doc.go | 18 - vendor/github.com/google/gofuzz/fuzz.go | 605 ---------------- vendor/github.com/google/uuid/CHANGELOG.md | 41 ++ vendor/github.com/google/uuid/CONTRIBUTING.md | 26 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 21 + vendor/github.com/google/uuid/dce.go | 80 ++ vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 59 ++ vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 +++ vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 +++ vendor/github.com/google/uuid/sql.go | 59 ++ vendor/github.com/google/uuid/time.go | 134 ++++ vendor/github.com/google/uuid/util.go | 43 ++ vendor/github.com/google/uuid/uuid.go | 365 ++++++++++ vendor/github.com/google/uuid/version1.go | 44 ++ vendor/github.com/google/uuid/version4.go | 76 ++ vendor/github.com/google/uuid/version6.go | 56 ++ vendor/github.com/google/uuid/version7.go | 104 +++ .../grpc/balancer/base/balancer.go | 12 +- .../endpointsharding/endpointsharding.go | 20 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 43 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 3 +- .../grpc/health/grpc_health_v1/health.pb.go | 167 ++++- .../health/grpc_health_v1/health_grpc.pb.go | 64 +- .../grpc/internal/envconfig/envconfig.go | 20 +- .../grpc/internal/internal.go | 13 + .../grpc/internal/metadata/metadata.go | 26 +- .../delegatingresolver/delegatingresolver.go | 40 +- .../grpc/internal/transport/client_stream.go | 2 +- .../grpc/internal/transport/http2_client.go | 3 +- .../grpc/internal/transport/http2_server.go | 23 +- .../grpc/internal/transport/server_stream.go | 6 +- vendor/google.golang.org/grpc/resolver/map.go | 174 +++-- .../grpc/resolver_wrapper.go | 23 +- vendor/google.golang.org/grpc/rpc_util.go | 16 +- vendor/google.golang.org/grpc/stats/stats.go | 35 +- vendor/google.golang.org/grpc/version.go | 2 +- .../k8s.io/apimachinery/pkg/api/errors/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../pkg/api/operation/operation.go | 56 ++ .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 13 +- .../pkg/apis/meta/v1/time_fuzz.go | 13 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 31 +- .../apis/meta/v1/unstructured/unstructured.go | 4 +- .../k8s.io/apimachinery/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/fields/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/labels/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 1 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 39 + .../serializer/cbor/internal/modes/custom.go | 4 +- .../pkg/runtime/serializer/codec_factory.go | 23 +- .../runtime/serializer/json/collections.go | 230 ++++++ .../pkg/runtime/serializer/json/json.go | 16 +- .../serializer/protobuf/collections.go | 174 +++++ .../pkg/runtime/serializer/protobuf/doc.go | 2 +- .../runtime/serializer/protobuf/protobuf.go | 87 ++- .../apimachinery/pkg/runtime/types_proto.go | 127 +++- vendor/k8s.io/apimachinery/pkg/types/doc.go | 2 +- .../apimachinery/pkg/util/errors/doc.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 6 +- .../pkg/util/intstr/instr_fuzz.go | 14 +- .../apimachinery/pkg/util/runtime/runtime.go | 46 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../util/validation/field/error_matcher.go | 212 ++++++ .../pkg/util/validation/field/errors.go | 132 +++- .../apimachinery/pkg/util/validation/ip.go | 278 +++++++ .../pkg/util/validation/validation.go | 40 - .../apimachinery/pkg/util/wait/backoff.go | 50 +- .../k8s.io/apimachinery/pkg/util/wait/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/loop.go | 4 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 9 +- .../apimachinery/pkg/util/yaml/decoder.go | 163 +++-- .../pkg/util/yaml/stream_reader.go | 130 ++++ vendor/k8s.io/apimachinery/pkg/version/doc.go | 4 +- .../k8s.io/apimachinery/pkg/version/types.go | 28 +- vendor/k8s.io/apimachinery/pkg/watch/doc.go | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 15 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 35 +- .../client-go/features/known_features.go | 7 + .../pkg/apis/clientauthentication/doc.go | 2 +- .../pkg/apis/clientauthentication/v1/doc.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 2 +- vendor/k8s.io/client-go/pkg/version/doc.go | 2 +- vendor/k8s.io/client-go/rest/.mockery.yaml | 10 + vendor/k8s.io/client-go/rest/client.go | 6 +- vendor/k8s.io/client-go/rest/config.go | 85 ++- vendor/k8s.io/client-go/rest/plugin.go | 7 +- vendor/k8s.io/client-go/rest/request.go | 138 ++-- vendor/k8s.io/client-go/rest/urlbackoff.go | 101 ++- vendor/k8s.io/client-go/rest/warnings.go | 57 +- vendor/k8s.io/client-go/rest/with_retry.go | 12 +- .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../k8s.io/client-go/tools/clientcmd/doc.go | 2 +- vendor/k8s.io/client-go/transport/cache.go | 8 +- .../client-go/transport/cert_rotation.go | 17 +- .../client-go/transport/round_trippers.go | 192 +++-- .../client-go/transport/token_source.go | 5 +- .../k8s.io/client-go/transport/transport.go | 2 +- vendor/k8s.io/client-go/util/cert/cert.go | 48 +- .../client-go/util/flowcontrol/backoff.go | 5 +- .../util/workqueue/delaying_queue.go | 19 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/parallelizer.go | 2 +- .../k8s.io/utils/clock/testing/fake_clock.go | 362 ---------- .../clock/testing/simple_interval_clock.go | 44 -- vendor/modules.txt | 48 +- vendor/sigs.k8s.io/randfill/CONTRIBUTING.md | 43 ++ .../gofuzz => sigs.k8s.io/randfill}/LICENSE | 6 +- vendor/sigs.k8s.io/randfill/NOTICE | 24 + vendor/sigs.k8s.io/randfill/OWNERS | 8 + vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 + .../gofuzz => sigs.k8s.io/randfill}/README.md | 45 +- vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS | 16 + .../randfill}/bytesource/bytesource.go | 0 .../sigs.k8s.io/randfill/code-of-conduct.md | 3 + vendor/sigs.k8s.io/randfill/randfill.go | 682 ++++++++++++++++++ .../v4/value/jsontagutil.go | 63 +- .../v4/value/reflectcache.go | 14 +- 160 files changed, 5599 insertions(+), 1991 deletions(-) create mode 100644 vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/root.go create mode 100644 vendor/code.cloudfoundry.org/cli/api/cloudcontroller/wrapper/trace_request.go create mode 100644 vendor/code.cloudfoundry.org/cli/api/router/wrapper/trace_request.go create mode 100644 vendor/code.cloudfoundry.org/cli/api/shared/trace_headers.go create mode 100644 vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/trace_request.go create mode 100644 vendor/code.cloudfoundry.org/cli/util/trace/trace.go delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/doc.go delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/operation/operation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/ip.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go create mode 100644 vendor/k8s.io/client-go/rest/.mockery.yaml delete mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go delete mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go create mode 100644 vendor/sigs.k8s.io/randfill/CONTRIBUTING.md rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/LICENSE (99%) create mode 100644 vendor/sigs.k8s.io/randfill/NOTICE create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/README.md (53%) create mode 100644 vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/bytesource/bytesource.go (100%) create mode 100644 vendor/sigs.k8s.io/randfill/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/randfill/randfill.go diff --git a/fetcher/bbs_client.go b/fetcher/bbs_client.go index 0bd25fe7..a3b64e91 100644 --- a/fetcher/bbs_client.go +++ b/fetcher/bbs_client.go @@ -55,10 +55,10 @@ func NewBBSClient(config *BBSConfig) (*BBSClient, error) { if err != nil { return nil, err } - if bbsClient.client.Ping(bbsClient.logger, trace.GenerateTraceID()) { - return &bbsClient, nil + if err = bbsClient.TestConnection(); err != nil { + return nil, fmt.Errorf("error connecting to BBS: %s", err) } - return nil, fmt.Errorf("failed to ping BBS") + return &bbsClient, nil } func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { diff --git a/go.mod b/go.mod index c4c8eee9..9447a778 100644 --- a/go.mod +++ b/go.mod @@ -4,10 +4,10 @@ go 1.24.1 require ( code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 - code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 - code.cloudfoundry.org/lager/v3 v3.27.0 + code.cloudfoundry.org/cli v0.0.0-20250410033454-7ef8a48b9bb3 + code.cloudfoundry.org/lager/v3 v3.32.0 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0 + github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.9.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.37.0 github.com/prometheus/client_golang v1.22.0 @@ -16,15 +16,15 @@ require ( ) require ( - code.cloudfoundry.org/bytefmt v0.34.0 // indirect + code.cloudfoundry.org/bytefmt v0.39.0 // indirect code.cloudfoundry.org/cfhttp/v2 v2.44.0 // indirect code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 // indirect - code.cloudfoundry.org/clock v1.32.0 // indirect + code.cloudfoundry.org/clock v1.37.0 // indirect code.cloudfoundry.org/go-log-cache/v2 v2.0.7 // indirect code.cloudfoundry.org/go-loggregator/v9 v9.2.1 // indirect code.cloudfoundry.org/jsonry v1.1.4 // indirect code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 // indirect - code.cloudfoundry.org/tlsconfig v0.22.0 // indirect + code.cloudfoundry.org/tlsconfig v0.26.0 // indirect code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/SermoDigital/jose v0.9.2-0.20161205224733-f6df55f235c2 // indirect @@ -48,8 +48,8 @@ require ( github.com/go-test/deep v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/jackc/pgx/v5 v5.7.4 // indirect github.com/jessevdk/go-flags v1.6.1 // indirect @@ -84,20 +84,21 @@ require ( golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.32.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/grpc v1.71.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/grpc v1.72.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.32.3 // indirect - k8s.io/client-go v0.32.3 // indirect + k8s.io/apimachinery v0.33.0 // indirect + k8s.io/client-go v0.33.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect + k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index d1df5ff6..aacef1df 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,17 @@ code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2 h1:r09wLVYq8X+X1XAJcXgizAdyjptm/tZS4ylWZCg4hzs= code.cloudfoundry.org/bbs v0.0.0-20250414163106-a163a3b524d2/go.mod h1:XKlGVVXFi5EcHHMPzw3xgONK9PeEZuUbIC43XNwxD10= -code.cloudfoundry.org/bytefmt v0.34.0 h1:ErjbaXWjKm6BwdataJUrQxOdtR3uvUoKIXlMaA/smV0= -code.cloudfoundry.org/bytefmt v0.34.0/go.mod h1:U3iZQ5YdK0/0QVYYW+QMLXGjBNXfmZmKt/J2V1eKHvc= +code.cloudfoundry.org/bytefmt v0.39.0 h1:N2h3ADKCTbxrVfWLP/YVx1QHnXQnVozqQuEtMjjecwc= +code.cloudfoundry.org/bytefmt v0.39.0/go.mod h1:1SmOxWKuMQMBDg4w6VOHq3xFT57aX0lZNLR52Eq8+JU= code.cloudfoundry.org/cfhttp/v2 v2.44.0 h1:SHb2oWRrMEigXQCfiXwmSkSZLZSZ+ua0AVbavVcefAU= code.cloudfoundry.org/cfhttp/v2 v2.44.0/go.mod h1:OYSxfFKC0HY7cbeXh2iQVcp4HnbucPBa0naTkOxzKZk= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 h1:Yc9r1p21kEpni9WlG4mwOZw87TB2QlyS9sAEebZ3+ak= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6/go.mod h1:u5FovqC5GGAEbFPz+IdjycDA+gIjhUwqxnu0vbHwVeM= -code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 h1:H3/+78JZSlecs6B76WkNsChGNIKB+Ns72deBEATGsEw= -code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7/go.mod h1:IdbcFbjs2waQvE3UvDLutHgUz8JiLqF3gW4Xeavs2rk= +code.cloudfoundry.org/cli v0.0.0-20250410033454-7ef8a48b9bb3 h1:C0OnliQ83Z6FNhNu6u1UriYwEo+npBlSyOnB/9Ljs4M= +code.cloudfoundry.org/cli v0.0.0-20250410033454-7ef8a48b9bb3/go.mod h1:NZUvc2yoa/aNmaZ1Ci5w5q5FuKkO0HT/ORManPgaOr4= code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240520170503-e7ed9c7432a0 h1:AqfPzPhykvxSpxMkDdZMSJrCbxMQ0rugvDB3BuqD0DE= code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240520170503-e7ed9c7432a0/go.mod h1:R1EiyOAr7lW0l/YkZNqItUNZ01Q/dYUfbTn4X4Z+82M= -code.cloudfoundry.org/clock v1.32.0 h1:GlGItvgiaemkPwxb+2/GNUfxiCs+1rjGiqTI8qZKJY8= -code.cloudfoundry.org/clock v1.32.0/go.mod h1:LJLHnDAxkJnhZVx+pbDv2POHvj40W++Skiirqo7Jgbo= +code.cloudfoundry.org/clock v1.37.0 h1:7e/FmrQ8f3cJW1aR4jhKWaEimBp5Ub39dOeNXQHq8HM= +code.cloudfoundry.org/clock v1.37.0/go.mod h1:9bvV2riUok6o34gOGGVIkX1v37wwsZbuSCBx8Y4laL0= code.cloudfoundry.org/diego-ssh v0.0.0-20230810200140-af9d79fe9c82 h1:Bns1y0jSlcvfP0u8ael+TUlnyNHsNX808zuo58bf5so= code.cloudfoundry.org/diego-ssh v0.0.0-20230810200140-af9d79fe9c82/go.mod h1:L2/glHnSK+wKnsG8oZZqdV2sgYY9NDo/I1aDJGhcWaM= code.cloudfoundry.org/go-log-cache/v2 v2.0.7 h1:yR/JjQ/RscO1n4xVAT9HDYcpx5ET/3Cq2/RhpJml6ZU= @@ -22,12 +22,12 @@ code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTg code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= code.cloudfoundry.org/jsonry v1.1.4 h1:P9N7IlH1/4aRCLcXLgLFj1hkcBmV7muijJzY+K6U4hE= code.cloudfoundry.org/jsonry v1.1.4/go.mod h1:6aKilShQP7w/Ez76h1El2/n9y2OkHuU56nKSBB9Gp0A= -code.cloudfoundry.org/lager/v3 v3.27.0 h1:LZ/cxraneE84Rq7J7Z6lDA7x/nbe5iOM90yKJmVMhAU= -code.cloudfoundry.org/lager/v3 v3.27.0/go.mod h1:f8QnZ+7h8JXO/qujwTKVbuPj83gJuVJEXuet/1DTjjc= +code.cloudfoundry.org/lager/v3 v3.32.0 h1:2OmBpTkX17PSFhkPny1P9K642Anwq5OsdpU5vGyerVc= +code.cloudfoundry.org/lager/v3 v3.32.0/go.mod h1:8agmjq9SQseFS7XGTm4iAf6Rn76V7JKfRe60wEMvkCQ= code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 h1:m6Zwwr6HjmdXS/EGwIhar0N6ExQZvmqYSC23MNE+5jc= code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201/go.mod h1:AwHLRkdXtttLXNB8RHgLfErJ2kKafH62AR2OClhy6xI= -code.cloudfoundry.org/tlsconfig v0.22.0 h1:zgzDd4lp++vov8azKP1LdAOBEViPRwm1lg67FHJ4W7Q= -code.cloudfoundry.org/tlsconfig v0.22.0/go.mod h1:RX++v+3sJ7bCv0rhFAMryoDAeQeGWJv7bXdwK7DfIzo= +code.cloudfoundry.org/tlsconfig v0.26.0 h1:M8mdkKbOv+y6I6bOIsVCJzYl0BGykIhf9o4EOb096ME= +code.cloudfoundry.org/tlsconfig v0.26.0/go.mod h1:NOe+islgBrrY5Z/kzbTTb4P9xmjXhcSwHNSKRYB1PYs= code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d h1:M+zXqtXJqcsmpL76aU0tdl1ho23eYa4axYoM4gD62UA= code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d/go.mod h1:YUJiVOr5xl0N/RjMxM1tHmgSpBbi5UM+KoVR5AoejO0= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= @@ -56,8 +56,8 @@ github.com/charlievieth/fs v0.0.3/go.mod h1:hD4sRzto1Hw8zCua76tNVKZxaeZZr1RiKftj github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0 h1:3H97RCKWgGEEZbL+skOSuhIcv9ZaOoMgdtTUzp9hN3s= -github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0/go.mod h1:m/LWQprhLCXxnHoVSIJ3Wt3VDr3dlpCgbTGbB/lA+rk= +github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.9.0 h1:uBhZQaz0BwmI48uwwC1dZs+RAjK6VKtEWtKTIDXl/kU= +github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.9.0/go.mod h1:rxFVNLI47ba21fZU2asbBuN2rHlInTBPcyvoRpDd2CM= github.com/cloudfoundry/bosh-cli v6.4.1+incompatible h1:n5/+NIF9QxvGINOrjh6DmO+GTen78MoCj5+LU9L8bR4= github.com/cloudfoundry/bosh-cli v6.4.1+incompatible/go.mod h1:rzIB+e1sn7wQL/TJ54bl/FemPKRhXby5BIMS3tLuWFM= github.com/cloudfoundry/bosh-utils v0.0.538 h1:5J99nCN0NynRK9lUcIAXeswW4pv2bkhyLLevajjHb/s= @@ -112,8 +112,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -125,8 +125,6 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 h1:gD0vax+4I+mAj+jEChEf25Ia07Jq7kYOFO5PPhAxFl4= github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= @@ -209,9 +207,8 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= @@ -225,8 +222,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0= github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -236,6 +233,8 @@ github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/square/certstrap v1.3.0 h1:N9P0ZRA+DjT8pq5fGDj0z3FjafRKnBDypP0QHpMlaAk= github.com/square/certstrap v1.3.0/go.mod h1:wGZo9eE1B7WX2GKBn0htJ+B3OuRl2UsdCFySNooy9hU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -269,8 +268,8 @@ go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.step.sm/crypto v0.59.2 h1:pW6qsBW03hd/OFHi2j7hF4sWnEJ0u7rn3reDSqPb46c= -go.step.sm/crypto v0.59.2/go.mod h1:bPwUACtYU1CR5ohZTetjBz9CfyF9qql3LllAjw+t3rs= +go.step.sm/crypto v0.62.0 h1:ulZjt+7tkE4f+sUdGevnRqh5/6GRkex8/lWbpNKLDZY= +go.step.sm/crypto v0.62.0/go.mod h1:mEI+M+m1s4AKiqTm6NBX5+X3uQE2hO4bhEUJBdIQZpQ= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -354,12 +353,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34 h1:0PeQib/pH3nB/5pEmFeVQJotzGohV0dq4Vcp09H5yhE= +google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34/go.mod h1:0awUlEkap+Pb1UMeJwJQQAdJQrt3moU7J2moTy69irI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -393,22 +392,24 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.4.0-0.dev/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg= +k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/auth.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/auth.go index aef734c2..6c03cc4f 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/auth.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/auth.go @@ -25,7 +25,7 @@ func NewDefaultAuthActor(config Config, uaaClient UAAClient) AuthActor { } func (actor defaultAuthActor) Authenticate(credentials map[string]string, origin string, grantType constant.GrantType) error { - if grantType == constant.GrantTypePassword && actor.config.UAAGrantType() == string(constant.GrantTypeClientCredentials) { + if (grantType == constant.GrantTypePassword || grantType == constant.GrantTypeJwtBearer) && actor.config.UAAGrantType() == string(constant.GrantTypeClientCredentials) { return actionerror.PasswordGrantTypeLogoutRequiredError{} } @@ -45,7 +45,7 @@ func (actor defaultAuthActor) Authenticate(credentials map[string]string, origin actor.config.SetUAAGrantType(string(grantType)) } - if grantType == constant.GrantTypeClientCredentials { + if (grantType == constant.GrantTypeClientCredentials || grantType == constant.GrantTypeJwtBearer) && credentials["client_id"] != "" { actor.config.SetUAAClientCredentials(credentials["client_id"], "") } diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go index 1350fd09..34b1d3e3 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/cloud_controller_client.go @@ -88,6 +88,7 @@ type CloudControllerClient interface { GetEvents(query ...ccv3.Query) ([]ccv3.Event, ccv3.Warnings, error) GetFeatureFlag(featureFlagName string) (resources.FeatureFlag, ccv3.Warnings, error) GetFeatureFlags() ([]resources.FeatureFlag, ccv3.Warnings, error) + GetRoot() (ccv3.Root, ccv3.Warnings, error) GetInfo() (ccv3.Info, ccv3.Warnings, error) GetIsolationSegment(guid string) (resources.IsolationSegment, ccv3.Warnings, error) GetIsolationSegmentOrganizations(isolationSegmentGUID string) ([]resources.Organization, ccv3.Warnings, error) @@ -148,7 +149,7 @@ type CloudControllerClient interface { PollJobToEventStream(jobURL ccv3.JobURL) chan ccv3.PollJobEvent PurgeServiceOffering(serviceOfferingGUID string) (ccv3.Warnings, error) ResourceMatch(resources []ccv3.Resource) ([]ccv3.Resource, ccv3.Warnings, error) - RootResponse() (ccv3.Info, ccv3.Warnings, error) + RootResponse() (ccv3.Root, ccv3.Warnings, error) SetApplicationDroplet(appGUID string, dropletGUID string) (resources.Relationship, ccv3.Warnings, error) SharePrivateDomainToOrgs(domainGuid string, sharedOrgs ccv3.SharedOrgs) (ccv3.Warnings, error) ShareServiceInstanceToSpaces(serviceInstanceGUID string, spaceGUIDs []string) (resources.RelationshipList, ccv3.Warnings, error) diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go index e21873d8..95faadb6 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/deployment.go @@ -29,7 +29,7 @@ func (actor Actor) GetLatestActiveDeploymentForApp(appGUID string) (resources.De return resources.Deployment{}, Warnings(warnings), actionerror.ActiveDeploymentNotFoundError{} } - return resources.Deployment(ccDeployments[0]), Warnings(warnings), nil + return ccDeployments[0], Warnings(warnings), nil } func (actor Actor) CancelDeployment(deploymentGUID string) (Warnings, error) { diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/info.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/info.go index 723c1b58..909f0c66 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/info.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/info.go @@ -2,9 +2,18 @@ package v7action import "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" +type Root ccv3.Root type Info ccv3.Info -func (actor Actor) GetRootResponse() (Info, Warnings, error) { +func (actor Actor) GetRootResponse() (Root, Warnings, error) { + root, warnings, err := actor.CloudControllerClient.GetRoot() + if err != nil { + return Root{}, Warnings(warnings), err + } + return Root(root), Warnings(warnings), nil +} + +func (actor Actor) GetInfoResponse() (Info, Warnings, error) { info, warnings, err := actor.CloudControllerClient.GetInfo() if err != nil { return Info{}, Warnings(warnings), err diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/ssh.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/ssh.go index 30fcc6ff..67f1ecd8 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/ssh.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/ssh.go @@ -25,7 +25,7 @@ func (actor Actor) GetSecureShellConfigurationByApplicationNameSpaceProcessTypeA ) (SSHAuthentication, Warnings, error) { var allWarnings Warnings - rootInfo, warnings, err := actor.CloudControllerClient.GetInfo() + rootInfo, warnings, err := actor.CloudControllerClient.GetRoot() allWarnings = append(allWarnings, warnings...) if err != nil { return SSHAuthentication{}, allWarnings, err diff --git a/vendor/code.cloudfoundry.org/cli/actor/v7action/target.go b/vendor/code.cloudfoundry.org/cli/actor/v7action/target.go index a63a0271..01c076a6 100644 --- a/vendor/code.cloudfoundry.org/cli/actor/v7action/target.go +++ b/vendor/code.cloudfoundry.org/cli/actor/v7action/target.go @@ -14,7 +14,7 @@ func (actor Actor) SetTarget(settings TargetSettings) (Warnings, error) { actor.CloudControllerClient.TargetCF(ccv3.TargetSettings(settings)) - rootInfo, warnings, err := actor.CloudControllerClient.GetInfo() + rootInfo, warnings, err := actor.CloudControllerClient.GetRoot() allWarnings = append(allWarnings, warnings...) if err != nil { return allWarnings, err diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go index 94cddc0b..148b2b28 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/client.go @@ -86,7 +86,7 @@ type Warnings []string // Client can be used to talk to a Cloud Controller's V3 Endpoints. type Client struct { - Info + Root CloudControllerURL string Requester diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/info.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/info.go index 564834a8..8434eb61 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/info.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/info.go @@ -3,134 +3,22 @@ package ccv3 import ( "net/http" - "code.cloudfoundry.org/cli/api/cloudcontroller" "code.cloudfoundry.org/cli/api/cloudcontroller/ccerror" - "code.cloudfoundry.org/cli/resources" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal" ) -type InfoLinks struct { - // AppSSH is the link for application ssh info. - AppSSH resources.APILink `json:"app_ssh"` - - // CCV3 is the link to the Cloud Controller V3 API. - CCV3 resources.APILink `json:"cloud_controller_v3"` - - // Logging is the link to the Logging API. - Logging resources.APILink `json:"logging"` - - // Logging is the link to the Logging API. - LogCache resources.APILink `json:"log_cache"` - - // NetworkPolicyV1 is the link to the Container to Container Networking - // API. - NetworkPolicyV1 resources.APILink `json:"network_policy_v1"` - - // Routing is the link to the routing API - Routing resources.APILink `json:"routing"` - - // UAA is the link to the UAA API. - UAA resources.APILink `json:"uaa"` - - // Login is the link to the Login API. - Login resources.APILink `json:"login"` -} - -// Info represents a GET response from the '/' endpoint of the cloud -// controller API. type Info struct { - // Links is a list of top level Cloud Controller APIs. - Links InfoLinks `json:"links"` - CFOnK8s bool `json:"cf_on_k8s"` -} - -// AppSSHEndpoint returns the HREF for SSHing into an app container. -func (info Info) AppSSHEndpoint() string { - return info.Links.AppSSH.HREF -} - -// AppSSHHostKeyFingerprint returns the SSH key fingerprint of the SSH proxy -// that brokers connections to application instances. -func (info Info) AppSSHHostKeyFingerprint() string { - return info.Links.AppSSH.Meta.HostKeyFingerprint -} - -// CloudControllerAPIVersion returns the version of the CloudController. -func (info Info) CloudControllerAPIVersion() string { - return info.Links.CCV3.Meta.Version -} - -// LogCache returns the HREF of the Loggregator Traffic Controller. -func (info Info) LogCache() string { - return info.Links.LogCache.HREF -} - -// Logging returns the HREF of the Loggregator Traffic Controller. -func (info Info) Logging() string { - return info.Links.Logging.HREF -} - -// NetworkPolicyV1 returns the HREF of the Container Networking v1 Policy API -func (info Info) NetworkPolicyV1() string { - return info.Links.NetworkPolicyV1.HREF + Name string `json:"name"` + Build string `json:"build"` + OSBAPIVersion string `json:"osbapi_version"` } -// OAuthClient returns the oauth client ID of the SSH proxy that brokers -// connections to application instances. -func (info Info) OAuthClient() string { - return info.Links.AppSSH.Meta.OAuthClient -} - -// Routing returns the HREF of the routing API. -func (info Info) Routing() string { - return info.Links.Routing.HREF -} - -// UAA returns the HREF of the UAA server. -func (info Info) UAA() string { - return info.Links.UAA.HREF -} - -// Login returns the HREF of the login server. -func (info Info) Login() string { - return info.Links.Login.HREF -} - -// ResourceLinks represents the information returned back from /v3. -type ResourceLinks map[string]resources.APILink - -// UnmarshalJSON helps unmarshal a Cloud Controller /v3 response. -func (links ResourceLinks) UnmarshalJSON(data []byte) error { - var ccResourceLinks struct { - Links map[string]resources.APILink `json:"links"` - } - err := cloudcontroller.DecodeJSON(data, &ccResourceLinks) - if err != nil { - return err - } - - for key, val := range ccResourceLinks.Links { - links[key] = val - } - - return nil -} - -// GetInfo returns endpoint and API information from /v3. +// GetRoot returns the /v3/info response func (client *Client) GetInfo() (Info, Warnings, error) { - rootResponse, warnings, err := client.RootResponse() - if err != nil { - return Info{}, warnings, err - } - - return rootResponse, warnings, err -} - -// rootResponse returns the CC API root document. -func (client *Client) RootResponse() (Info, Warnings, error) { var responseBody Info _, warnings, err := client.MakeRequest(RequestParams{ - URL: client.CloudControllerURL, + RequestName: internal.Info, ResponseBody: &responseBody, }) diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go index fbaf2a9c..5ba7deda 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/internal/api_routes.go @@ -181,6 +181,7 @@ const ( UnshareRouteRequest = "UnshareRoute" UpdateRouteRequest = "UpdateRoute" WhoAmI = "WhoAmI" + Info = "Info" ) // APIRoutes is a list of routes used by the router to construct request URLs. @@ -357,4 +358,5 @@ var APIRoutes = map[string]Route{ PostUserRequest: {Path: "/v3/users", Method: http.MethodPost}, DeleteUserRequest: {Path: "/v3/users/:user_guid", Method: http.MethodDelete}, WhoAmI: {Path: "/whoami", Method: http.MethodGet}, + Info: {Path: "/v3/info", Method: http.MethodGet}, } diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/root.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/root.go new file mode 100644 index 00000000..79eed501 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/root.go @@ -0,0 +1,143 @@ +package ccv3 + +import ( + "net/http" + + "code.cloudfoundry.org/cli/api/cloudcontroller" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccerror" + "code.cloudfoundry.org/cli/resources" +) + +type RootLinks struct { + // AppSSH is the link for application ssh info. + AppSSH resources.APILink `json:"app_ssh"` + + // CCV3 is the link to the Cloud Controller V3 API. + CCV3 resources.APILink `json:"cloud_controller_v3"` + + // Logging is the link to the Logging API. + Logging resources.APILink `json:"logging"` + + // Logging is the link to the Logging API. + LogCache resources.APILink `json:"log_cache"` + + // NetworkPolicyV1 is the link to the Container to Container Networking + // API. + NetworkPolicyV1 resources.APILink `json:"network_policy_v1"` + + // Routing is the link to the routing API + Routing resources.APILink `json:"routing"` + + // UAA is the link to the UAA API. + UAA resources.APILink `json:"uaa"` + + // Login is the link to the Login API. + Login resources.APILink `json:"login"` +} + +// Root represents a GET response from the '/' endpoint of the cloud +// controller API. +type Root struct { + // Links is a list of top level Cloud Controller APIs. + Links RootLinks `json:"links"` + CFOnK8s bool `json:"cf_on_k8s"` +} + +// AppSSHEndpoint returns the HREF for SSHing into an app container. +func (info Root) AppSSHEndpoint() string { + return info.Links.AppSSH.HREF +} + +// AppSSHHostKeyFingerprint returns the SSH key fingerprint of the SSH proxy +// that brokers connections to application instances. +func (info Root) AppSSHHostKeyFingerprint() string { + return info.Links.AppSSH.Meta.HostKeyFingerprint +} + +// CloudControllerAPIVersion returns the version of the CloudController. +func (info Root) CloudControllerAPIVersion() string { + return info.Links.CCV3.Meta.Version +} + +// LogCache returns the HREF of the Loggregator Traffic Controller. +func (info Root) LogCache() string { + return info.Links.LogCache.HREF +} + +// Logging returns the HREF of the Loggregator Traffic Controller. +func (info Root) Logging() string { + return info.Links.Logging.HREF +} + +// NetworkPolicyV1 returns the HREF of the Container Networking v1 Policy API +func (info Root) NetworkPolicyV1() string { + return info.Links.NetworkPolicyV1.HREF +} + +// OAuthClient returns the oauth client ID of the SSH proxy that brokers +// connections to application instances. +func (info Root) OAuthClient() string { + return info.Links.AppSSH.Meta.OAuthClient +} + +// Routing returns the HREF of the routing API. +func (info Root) Routing() string { + return info.Links.Routing.HREF +} + +// UAA returns the HREF of the UAA server. +func (info Root) UAA() string { + return info.Links.UAA.HREF +} + +// Login returns the HREF of the login server. +func (info Root) Login() string { + return info.Links.Login.HREF +} + +// ResourceLinks represents the information returned back from /v3. +type ResourceLinks map[string]resources.APILink + +// UnmarshalJSON helps unmarshal a Cloud Controller /v3 response. +func (links ResourceLinks) UnmarshalJSON(data []byte) error { + var ccResourceLinks struct { + Links map[string]resources.APILink `json:"links"` + } + err := cloudcontroller.DecodeJSON(data, &ccResourceLinks) + if err != nil { + return err + } + + for key, val := range ccResourceLinks.Links { + links[key] = val + } + + return nil +} + +// GetRoot returns endpoint and API information from /v3. +func (client *Client) GetRoot() (Root, Warnings, error) { + rootResponse, warnings, err := client.RootResponse() + if err != nil { + return Root{}, warnings, err + } + + return rootResponse, warnings, err +} + +// RootResponse returns the CC API root document. +func (client *Client) RootResponse() (Root, Warnings, error) { + var responseBody Root + + _, warnings, err := client.MakeRequest(RequestParams{ + URL: client.CloudControllerURL, + ResponseBody: &responseBody, + }) + + unknownSourceErr, ok := err.(ccerror.UnknownHTTPSourceError) + if ok && unknownSourceErr.StatusCode == http.StatusNotFound { + return Root{}, nil, ccerror.APINotFoundError{URL: client.CloudControllerURL} + } + + return responseBody, warnings, err +} diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go index fcee5cdb..68f6d37a 100644 --- a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/ccversion/minimum_version.go @@ -18,4 +18,6 @@ const ( MinVersionCNB = "3.168.0" MinVersionPerRouteOpts = "3.183.0" + + MinVersionCanarySteps = "3.189.0" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/wrapper/trace_request.go b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/wrapper/trace_request.go new file mode 100644 index 00000000..73f6fe47 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/api/cloudcontroller/wrapper/trace_request.go @@ -0,0 +1,31 @@ +package wrapper + +import ( + "code.cloudfoundry.org/cli/api/cloudcontroller" + "code.cloudfoundry.org/cli/api/shared" +) + +// CCTraceHeaderRequest is a wrapper that adds b3 trace headers to requests. +type CCTraceHeaderRequest struct { + headers *shared.TraceHeaders + connection cloudcontroller.Connection +} + +// NewCCTraceHeaderRequest returns a pointer to a CCTraceHeaderRequest wrapper. +func NewCCTraceHeaderRequest(trace string) *CCTraceHeaderRequest { + return &CCTraceHeaderRequest{ + headers: shared.NewTraceHeaders(trace), + } +} + +// Add tracing headers +func (t *CCTraceHeaderRequest) Make(request *cloudcontroller.Request, passedResponse *cloudcontroller.Response) error { + t.headers.SetHeaders(request.Request) + return t.connection.Make(request, passedResponse) +} + +// Wrap sets the connection in the CCTraceHeaderRequest and returns itself. +func (t *CCTraceHeaderRequest) Wrap(innerconnection cloudcontroller.Connection) cloudcontroller.Connection { + t.connection = innerconnection + return t +} diff --git a/vendor/code.cloudfoundry.org/cli/api/router/wrapper/trace_request.go b/vendor/code.cloudfoundry.org/cli/api/router/wrapper/trace_request.go new file mode 100644 index 00000000..a96a1ce2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/api/router/wrapper/trace_request.go @@ -0,0 +1,31 @@ +package wrapper + +import ( + "code.cloudfoundry.org/cli/api/router" + "code.cloudfoundry.org/cli/api/shared" +) + +// RoutingTraceHeaderRequest is a wrapper that adds b3 trace headers to requests. +type RoutingTraceHeaderRequest struct { + headers *shared.TraceHeaders + connection router.Connection +} + +// NewRoutingTraceHeaderRequest returns a pointer to a RoutingTraceHeaderRequest wrapper. +func NewRoutingTraceHeaderRequest(trace string) *RoutingTraceHeaderRequest { + return &RoutingTraceHeaderRequest{ + headers: shared.NewTraceHeaders(trace), + } +} + +// Add tracing headers +func (t *RoutingTraceHeaderRequest) Make(request *router.Request, passedResponse *router.Response) error { + t.headers.SetHeaders(request.Request) + return t.connection.Make(request, passedResponse) +} + +// Wrap sets the connection in the RoutingTraceHeaderRequest and returns itself. +func (t *RoutingTraceHeaderRequest) Wrap(innerconnection router.Connection) router.Connection { + t.connection = innerconnection + return t +} diff --git a/vendor/code.cloudfoundry.org/cli/api/shared/trace_headers.go b/vendor/code.cloudfoundry.org/cli/api/shared/trace_headers.go new file mode 100644 index 00000000..e913b5c6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/api/shared/trace_headers.go @@ -0,0 +1,37 @@ +package shared + +import ( + "net/http" + + "code.cloudfoundry.org/cli/util/trace" +) + +const ( + B3TraceIDHeader = "X-B3-TraceId" + B3SpanIDHeader = "X-B3-SpanId" +) + +// TraceHeaders sets b3 trace headers to requests. +type TraceHeaders struct { + b3trace string +} + +// NewTraceHeaders returns a pointer to a TraceHeaderRequest. +func NewTraceHeaders(trace string) *TraceHeaders { + return &TraceHeaders{ + b3trace: trace, + } +} + +// Add tracing headers if they are not already set. +func (t *TraceHeaders) SetHeaders(request *http.Request) { + // only override the trace headers if they are not already set (e.g. already explicitly set by cf curl) + if request.Header.Get(B3TraceIDHeader) == "" { + request.Header.Add(B3TraceIDHeader, t.b3trace) + } + if request.Header.Get(B3SpanIDHeader) == "" { + request.Header.Add(B3SpanIDHeader, trace.GenerateRandomTraceID(16)) + } + + // request.Header.Add(("B3", request.Header.Get(B3TraceIDHeader)+request.Header.Get(B3SpanIDHeader))) +} diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/auth.go b/vendor/code.cloudfoundry.org/cli/api/uaa/auth.go index 1e9ddf6a..42e7c7c4 100644 --- a/vendor/code.cloudfoundry.org/cli/api/uaa/auth.go +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/auth.go @@ -62,6 +62,17 @@ func (client Client) Authenticate(creds map[string]string, origin string, grantT if grantType == constant.GrantTypePassword { request.SetBasicAuth(client.config.UAAOAuthClient(), client.config.UAAOAuthClientSecret()) + } else if grantType == constant.GrantTypeJwtBearer { + // overwrite client authentication in case of provided parameters in cf auth clientid clientsecret or use defaults as done in password grant + clientId := client.config.UAAOAuthClient() + clientSecret := client.config.UAAOAuthClientSecret() + if creds["client_id"] != "" { + clientId = creds["client_id"] + } + if creds["client_secret"] != "" { + clientSecret = creds["client_secret"] + } + request.SetBasicAuth(clientId, clientSecret) } responseBody := AuthResponse{} diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/constant/grant_type.go b/vendor/code.cloudfoundry.org/cli/api/uaa/constant/grant_type.go index e302d2f6..7cc92cc1 100644 --- a/vendor/code.cloudfoundry.org/cli/api/uaa/constant/grant_type.go +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/constant/grant_type.go @@ -10,4 +10,6 @@ const ( // GrantTypePassword is used for user's username/password authentication. GrantTypePassword GrantType = "password" GrantTypeRefreshToken GrantType = "refresh_token" + // GrantTypeJwtBearer is used for token based user authentication + GrantTypeJwtBearer GrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" ) diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/refresh_token.go b/vendor/code.cloudfoundry.org/cli/api/uaa/refresh_token.go index fba19e21..18407142 100644 --- a/vendor/code.cloudfoundry.org/cli/api/uaa/refresh_token.go +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/refresh_token.go @@ -29,7 +29,7 @@ func (client *Client) RefreshAccessToken(refreshToken string) (RefreshedTokens, switch client.config.UAAGrantType() { case string(constant.GrantTypeClientCredentials): values = client.clientCredentialRefreshBody() - case "", string(constant.GrantTypePassword): // CLI used to write empty string for grant type in the case of password; preserve compatibility with old config.json files + case "", string(constant.GrantTypePassword), string(constant.GrantTypeJwtBearer): // CLI used to write empty string for grant type in the case of password; preserve compatibility with old config.json files values = client.refreshTokenBody(refreshToken) } diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/trace_request.go b/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/trace_request.go new file mode 100644 index 00000000..dedebf6e --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/trace_request.go @@ -0,0 +1,33 @@ +package wrapper + +import ( + "net/http" + + "code.cloudfoundry.org/cli/api/shared" + "code.cloudfoundry.org/cli/api/uaa" +) + +// UAATraceHeaderRequest is a wrapper that adds b3 trace headers to requests. +type UAATraceHeaderRequest struct { + headers *shared.TraceHeaders + connection uaa.Connection +} + +// NewUAATraceHeaderRequest returns a pointer to a UAATraceHeaderRequest wrapper. +func NewUAATraceHeaderRequest(trace string) *UAATraceHeaderRequest { + return &UAATraceHeaderRequest{ + headers: shared.NewTraceHeaders(trace), + } +} + +// Add tracing headers +func (t *UAATraceHeaderRequest) Make(request *http.Request, passedResponse *uaa.Response) error { + t.headers.SetHeaders(request) + return t.connection.Make(request, passedResponse) +} + +// Wrap sets the connection in the UAATraceHeaderRequest and returns itself. +func (t *UAATraceHeaderRequest) Wrap(innerconnection uaa.Connection) uaa.Connection { + t.connection = innerconnection + return t +} diff --git a/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/uaa_authentication.go b/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/uaa_authentication.go index 96bc7e64..25f25c59 100644 --- a/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/uaa_authentication.go +++ b/vendor/code.cloudfoundry.org/cli/api/uaa/wrapper/uaa_authentication.go @@ -109,5 +109,6 @@ func skipAuthenticationHeader(request *http.Request, body []byte) bool { request.Method == http.MethodPost && (strings.Contains(stringBody, "grant_type=refresh_token") || strings.Contains(stringBody, "grant_type=password") || + strings.Contains(stringBody, "grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer") || strings.Contains(stringBody, "grant_type=client_credentials")) } diff --git a/vendor/code.cloudfoundry.org/cli/cf/configuration/coreconfig/config_data.go b/vendor/code.cloudfoundry.org/cli/cf/configuration/coreconfig/config_data.go index a5314a12..f017333c 100644 --- a/vendor/code.cloudfoundry.org/cli/cf/configuration/coreconfig/config_data.go +++ b/vendor/code.cloudfoundry.org/cli/cf/configuration/coreconfig/config_data.go @@ -33,6 +33,7 @@ type Data struct { LogCacheEndPoint string MinCLIVersion string MinRecommendedCLIVersion string + NetworkPolicyV1Endpoint string OrganizationFields models.OrganizationFields PluginRepos []models.PluginRepo RefreshToken string diff --git a/vendor/code.cloudfoundry.org/cli/command/config.go b/vendor/code.cloudfoundry.org/cli/command/config.go index 676223c7..bde3b4a7 100644 --- a/vendor/code.cloudfoundry.org/cli/command/config.go +++ b/vendor/code.cloudfoundry.org/cli/command/config.go @@ -15,6 +15,7 @@ type Config interface { AddPluginRepository(name string, url string) AuthorizationEndpoint() string APIVersion() string + B3TraceID() string BinaryName() string BinaryVersion() string CFPassword() string diff --git a/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go b/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go index 81fa6fe9..65f97551 100644 --- a/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go +++ b/vendor/code.cloudfoundry.org/cli/resources/deployment_resource.go @@ -12,6 +12,7 @@ type Deployment struct { State constant.DeploymentState StatusValue constant.DeploymentStatusValue StatusReason constant.DeploymentStatusReason + CanaryStatus CanaryStatus LastStatusChange string Options DeploymentOpts RevisionGUID string @@ -78,6 +79,15 @@ func (d Deployment) MarshalJSON() ([]byte, error) { return json.Marshal(ccDeployment) } +type CanaryStepStatus struct { + CurrentStep int `json:"current"` + TotalSteps int `json:"total"` +} + +type CanaryStatus struct { + Steps CanaryStepStatus `json:"steps"` +} + // UnmarshalJSON helps unmarshal a Cloud Controller Deployment response. func (d *Deployment) UnmarshalJSON(data []byte) error { var ccDeployment struct { @@ -89,8 +99,9 @@ func (d *Deployment) UnmarshalJSON(data []byte) error { Details struct { LastStatusChange string `json:"last_status_change"` } - Value constant.DeploymentStatusValue `json:"value"` - Reason constant.DeploymentStatusReason `json:"reason"` + Value constant.DeploymentStatusValue `json:"value"` + Reason constant.DeploymentStatusReason `json:"reason"` + CanaryStatus CanaryStatus `json:"canary,omitempty"` } `json:"status"` Droplet Droplet `json:"droplet,omitempty"` NewProcesses []Process `json:"new_processes,omitempty"` @@ -109,6 +120,7 @@ func (d *Deployment) UnmarshalJSON(data []byte) error { d.State = ccDeployment.State d.StatusValue = ccDeployment.Status.Value d.StatusReason = ccDeployment.Status.Reason + d.CanaryStatus = ccDeployment.Status.CanaryStatus d.LastStatusChange = ccDeployment.Status.Details.LastStatusChange d.DropletGUID = ccDeployment.Droplet.GUID d.NewProcesses = ccDeployment.NewProcesses diff --git a/vendor/code.cloudfoundry.org/cli/util/configv3/env.go b/vendor/code.cloudfoundry.org/cli/util/configv3/env.go index 27e15504..75e198cf 100644 --- a/vendor/code.cloudfoundry.org/cli/util/configv3/env.go +++ b/vendor/code.cloudfoundry.org/cli/util/configv3/env.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" "time" + + "code.cloudfoundry.org/cli/util/trace" ) // EnvOverride represents all the environment variables read by the CF CLI @@ -20,6 +22,7 @@ type EnvOverride struct { CFStartupTimeout string CFTrace string CFUsername string + CFB3TraceID string DockerPassword string CNBCredentials string Experimental string @@ -160,3 +163,10 @@ func (config *Config) StartupTimeout() time.Duration { return DefaultStartupTimeout } + +func (config *Config) B3TraceID() string { + if config.ENV.CFB3TraceID == "" { + config.ENV.CFB3TraceID = trace.GenerateUUIDTraceID() + } + return config.ENV.CFB3TraceID +} diff --git a/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go b/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go index d0a0c8b5..b6ee0fa2 100644 --- a/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go +++ b/vendor/code.cloudfoundry.org/cli/util/configv3/load_config.go @@ -127,6 +127,7 @@ func LoadConfig(flags ...FlagOverride) (*Config, error) { CFStartupTimeout: os.Getenv("CF_STARTUP_TIMEOUT"), CFTrace: os.Getenv("CF_TRACE"), CFUsername: os.Getenv("CF_USERNAME"), + CFB3TraceID: os.Getenv("CF_B3_TRACE_ID"), DockerPassword: os.Getenv("CF_DOCKER_PASSWORD"), CNBCredentials: os.Getenv("CF_CNB_REGISTRY_CREDS"), Experimental: os.Getenv("CF_CLI_EXPERIMENTAL"), diff --git a/vendor/code.cloudfoundry.org/cli/util/trace/trace.go b/vendor/code.cloudfoundry.org/cli/util/trace/trace.go new file mode 100644 index 00000000..3bd530f1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cli/util/trace/trace.go @@ -0,0 +1,25 @@ +package trace + +import ( + "crypto/rand" + "encoding/hex" + "strings" + + "github.com/google/uuid" +) + +// GenerateUUIDTraceID returns a UUID v4 string with the dashes removed as a 32 lower-hex encoded string. +func GenerateUUIDTraceID() string { + uuidV4 := uuid.New() + return strings.ReplaceAll(uuidV4.String(), "-", "") +} + +// GenerateRandomTraceID returns a random hex string of the given length. +func GenerateRandomTraceID(length int) string { + b := make([]byte, length/2) + if _, err := rand.Read(b); err != nil { + panic(err) + } + + return hex.EncodeToString(b) +} diff --git a/vendor/github.com/cloudfoundry-community/go-cf-clients-helper/v2/session.go b/vendor/github.com/cloudfoundry-community/go-cf-clients-helper/v2/session.go index c45c45c3..d5e84798 100644 --- a/vendor/github.com/cloudfoundry-community/go-cf-clients-helper/v2/session.go +++ b/vendor/github.com/cloudfoundry-community/go-cf-clients-helper/v2/session.go @@ -138,9 +138,9 @@ func (s *Session) init(config *configv3.Config, configUaa *configv3.Config, conf SkipSSLValidation: config.SkipSSLValidation(), DialTimeout: config.DialTimeout(), }) - info, _, err := ccClientV3.GetInfo() + root, _, err := ccClientV3.GetRoot() if err != nil { - return fmt.Errorf("could not fetch api root informations: %s", err) + return fmt.Errorf("could not fetch api root information: %s", err) } // create an uaa client with cf_username/cf_password or client_id/client secret @@ -150,7 +150,7 @@ func (s *Session) init(config *configv3.Config, configUaa *configv3.Config, conf uaaAuthWrapper := uaaWrapper.NewUAAAuthentication(nil, configUaa) uaaClient.WrapConnection(uaaAuthWrapper) uaaClient.WrapConnection(uaaWrapper.NewRetryRequest(config.RequestRetryCount())) - err = uaaClient.SetupResources(info.UAA(), info.Login()) + err = uaaClient.SetupResources(root.UAA(), root.Login()) if err != nil { return fmt.Errorf("error setup resource uaa: %s", err) } @@ -200,7 +200,7 @@ func (s *Session) init(config *configv3.Config, configUaa *configv3.Config, conf uaaAuthWrapperSess := uaaWrapper.NewUAAAuthentication(nil, configUaa) uaaClientSess.WrapConnection(uaaAuthWrapperSess) uaaClientSess.WrapConnection(uaaWrapper.NewRetryRequest(config.RequestRetryCount())) - err = uaaClientSess.SetupResources(info.UAA(), info.Login()) + err = uaaClientSess.SetupResources(root.UAA(), root.Login()) if err != nil { return fmt.Errorf("error setup resource uaa: %s", err) } @@ -282,7 +282,7 @@ func (s *Session) init(config *configv3.Config, configUaa *configv3.Config, conf DialTimeout: config.DialTimeout(), SkipSSLValidation: config.SkipSSLValidation(), }, - RoutingEndpoint: info.Routing(), + RoutingEndpoint: root.Routing(), } routerWrappers := []router.ConnectionWrapper{} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index 061d72ae..00000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - master - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 97c1b34f..00000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d4..00000000 --- a/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 761520a8..00000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,605 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "time" - - "github.com/google/gofuzz/bytesource" - "strings" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int - skipFieldPatterns []*regexp.Regexp -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// NewFromGoFuzz is a helper function that enables using gofuzz (this -// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous -// fuzzing. Essentially, it enables translating the fuzzing bytes from -// go-fuzz to any Go object using this library. -// -// This implementation promises a constant translation from a given slice of -// bytes to the fuzzed objects. This promise will remain over future -// versions of Go and of this library. -// -// Note: the returned Fuzzer should not be shared between multiple goroutines, -// as its deterministic output will no longer be available. -// -// Example: use go-fuzz to test the function `MyFunc(int)` in the package -// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: -// -// // +build gofuzz -// package mypacakge -// import fuzz "github.com/google/gofuzz" -// func Fuzz(data []byte) int { -// var i int -// fuzz.NewFromGoFuzz(data).Fuzz(&i) -// MyFunc(i) -// return 0 -// } -func NewFromGoFuzz(data []byte) *Fuzzer { - return New().RandSource(bytesource.New(data)) -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() >= f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Skip fields which match the supplied pattern. Call this multiple times if needed -// This is useful to skip XXX_ fields generated by protobuf -func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { - f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - skipField := false - fieldName := v.Type().Field(i).Name - for _, pattern := range fc.fuzzer.skipFieldPatterns { - if pattern.MatchString(fieldName) { - skipField = true - break - } - } - if !skipField { - fc.doFuzz(v.Field(i), 0) - } - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex(r.Float64(), r.Float64())) - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - return r.Int31()&(1<<30) == 0 -} - -type int63nPicker interface { - Int63n(int64) int64 -} - -// UnicodeRange describes a sequential range of unicode characters. -// Last must be numerically greater than First. -type UnicodeRange struct { - First, Last rune -} - -// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. -// To be useful, each range must have at least one character (First <= Last) and -// there must be at least one range. -type UnicodeRanges []UnicodeRange - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (ur UnicodeRange) choose(r int63nPicker) rune { - count := int64(ur.Last - ur.First + 1) - return ur.First + rune(r.Int63n(count)) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from the range ur. If there are no characters -// in the range (cr.Last < cr.First), this will panic. -func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { - ur.check() - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// check is a function that used to check whether the first of ur(UnicodeRange) -// is greater than the last one. -func (ur UnicodeRange) check() { - if ur.Last < ur.First { - panic("The last encoding must be greater than the first one.") - } -} - -// randString of UnicodeRange makes a random string up to 20 characters long. -// Each character is selected form ur(UnicodeRange). -func (ur UnicodeRange) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur.choose(r)) - } - return sb.String() -} - -// defaultUnicodeRanges sets a default unicode range when user do not set -// CustomStringFuzzFunc() but wants fuzz string. -var defaultUnicodeRanges = UnicodeRanges{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from one of the ranges of ur(UnicodeRanges). -// Each range has an equal probability of being chosen. If there are no ranges, -// or a selected range has no characters (.Last < .First), this will panic. -// Do not modify any of the ranges in ur after calling this function. -func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { - // Check unicode ranges slice is empty. - if len(ur) == 0 { - panic("UnicodeRanges is empty.") - } - // if not empty, each range should be checked. - for i := range ur { - ur[i].check() - } - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// randString of UnicodeRanges makes a random string up to 20 characters long. -// Each character is selected form one of the ranges of ur(UnicodeRanges), -// and each range has an equal probability of being chosen. -func (ur UnicodeRanges) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) - } - return sb.String() -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - return defaultUnicodeRanges.randString(r) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..7ec5ac7e --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 00000000..a502fdc5 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b4bb97f6 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 00000000..3e9a6188 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 00000000..fa820b9d --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 00000000..5b8a4b9a --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 00000000..dc60082d --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 00000000..14bd3407 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 00000000..d651a2b0 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 00000000..b2a0bc87 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 00000000..0cbbcddb --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 00000000..d7fcbf28 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 00000000..2e02ec06 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 00000000..c3511292 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,134 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. +func (uuid UUID) Time() Time { + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 00000000..5ea6c737 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 00000000..5232b486 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 00000000..46310962 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 00000000..7697802e --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000..339a959a --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000..3167b643 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d5ed172a..4d576876 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[balancer.SubConn](), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -65,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[balancer.SubConn] scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() + addrsSet := resolver.NewAddressMapV2[any]() for _, a := range s.ResolverState.Addresses { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(a) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() { // Filter out all ready SCs from full subConn map. for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(addr) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 421c4fec..cc606f4d 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -73,7 +73,7 @@ func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilde esOpts: esOpts, childBuilder: childBuilder, } - es.children.Store(resolver.NewEndpointMap()) + es.children.Store(resolver.NewEndpointMap[*balancerWrapper]()) return es } @@ -90,7 +90,7 @@ type endpointSharding struct { // calls into a child. To avoid deadlocks, do not acquire childMu while // holding mu. childMu sync.Mutex - children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper + children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]] // inhibitChildUpdates is set during UpdateClientConnState/ResolverError // calls (calls to children will each produce an update, only want one @@ -122,7 +122,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState var ret error children := es.children.Load() - newChildren := resolver.NewEndpointMap() + newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. for _, endpoint := range state.ResolverState.Endpoints { @@ -131,9 +131,8 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState // update. continue } - var childBalancer *balancerWrapper - if val, ok := children.Get(endpoint); ok { - childBalancer = val.(*balancerWrapper) + childBalancer, ok := children.Get(endpoint) + if ok { // Endpoint attributes may have changed, update the stored endpoint. es.mu.Lock() childBalancer.childState.Endpoint = endpoint @@ -166,7 +165,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState for _, e := range children.Keys() { child, _ := children.Get(e) if _, ok := newChildren.Get(e); !ok { - child.(*balancerWrapper).closeLocked() + child.closeLocked() } } es.children.Store(newChildren) @@ -189,7 +188,7 @@ func (es *endpointSharding) ResolverError(err error) { }() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).resolverErrorLocked(err) + child.resolverErrorLocked(err) } } @@ -202,7 +201,7 @@ func (es *endpointSharding) Close() { defer es.childMu.Unlock() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).closeLocked() + child.closeLocked() } } @@ -222,8 +221,7 @@ func (es *endpointSharding) updateState() { childStates := make([]ChildState, 0, children.Len()) for _, child := range children.Values() { - bw := child.(*balancerWrapper) - childState := bw.childState + childState := child.childState childStates = append(childStates, childState) childPicker := childState.State.Picker switch childState.State.ConnectivityState { diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 113181e6..494314f2 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -122,7 +122,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) target: bo.Target.String(), metricsRecorder: cc.MetricsRecorder(), - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[*scData](), state: connectivity.Connecting, cancelConnectionTimer: func() {}, } @@ -220,7 +220,7 @@ type pickfirstBalancer struct { // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[*scData] addressList addressList firstPass bool numTF int @@ -319,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState prevAddr := b.addressList.currentAddress() prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) // If the previous ready SubConn exists in new address list, @@ -381,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.numTF = 0 // Reset the connection attempt record for existing SubConns. for _, sd := range b.subConns.Values() { - sd.(*scData).connectionFailedInFirstPass = false + sd.connectionFailedInFirstPass = false } b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { - sd.(*scData).subConn.Shutdown() + sd.subConn.Shutdown() } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() } // deDupAddresses ensures that each address appears only once in the slice. func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMap() + seenAddrs := resolver.NewAddressMapV2[*scData]() retAddrs := []resolver.Address{} for _, addr := range addrs { @@ -481,7 +481,7 @@ func addressFamily(address string) ipAddrFamily { // This ensures that the subchannel map accurately reflects the current set of // addresses received from the name resolver. func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMap() + newAddrsMap := resolver.NewAddressMapV2[bool]() for _, addr := range newAddrs { newAddrsMap.Set(addr, true) } @@ -491,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) continue } val, _ := b.subConns.Get(oldAddr) - val.(*scData).subConn.Shutdown() + val.subConn.Shutdown() b.subConns.Delete(oldAddr) } } @@ -500,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { b.cancelConnectionTimer() - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() b.subConns.Set(selected.addr, selected) } @@ -539,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.subConns.Set(curAddr, sd) } - scd := sd.(*scData) - switch scd.rawConnectivityState { + switch sd.rawConnectivityState { case connectivity.Idle: - scd.subConn.Connect() + sd.subConn.Connect() b.scheduleNextConnectionLocked() return case connectivity.TransientFailure: // The SubConn is being re-used and failed during a previous pass // over the addressList. It has not completed backoff yet. // Mark it as having failed and try the next address. - scd.connectionFailedInFirstPass = true - lastErr = scd.lastErr + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr continue case connectivity.Connecting: // Wait for the connection attempt to complete or the timer to fire @@ -558,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.scheduleNextConnectionLocked() return default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) return } @@ -753,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } // Connect() has been called on all the SubConns. The first pass can be // ended if all the SubConns have reported a failure. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if !sd.connectionFailedInFirstPass { return } @@ -765,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } @@ -927,6 +923,5 @@ func (al *addressList) hasNext() bool { // fields that are meaningful to the SubConn. func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata + a.Attributes.Equal(b.Attributes) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b2f8fc7f..825c3179 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a319ef97..4f350ca5 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1231,8 +1231,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: + if r == transport.GoAwayTooManyPings { v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() if v > ac.cc.keepaliveParams.Time { diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 94177b05..faa59e41 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -178,6 +178,87 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { return HealthCheckResponse_UNKNOWN } +type HealthListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListRequest) Reset() { + *x = HealthListRequest{} + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListRequest) ProtoMessage() {} + +func (x *HealthListRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead. +func (*HealthListRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2} +} + +type HealthListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // statuses contains all the services and their respective status. + Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListResponse) Reset() { + *x = HealthListResponse{} + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListResponse) ProtoMessage() {} + +func (x *HealthListResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead. +func (*HealthListResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3} +} + +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { + if x != nil { + return x.Statuses + } + return nil +} + var File_grpc_health_v1_health_proto protoreflect.FileDescriptor var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ @@ -198,25 +279,44 @@ var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, + 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01, + 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a, + 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2, + 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02, + 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( @@ -232,23 +332,30 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { } var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse + (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest + (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse + nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry } var file_grpc_health_v1_health_proto_depIdxs = []int32{ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry + 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse + 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest + 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse + 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_grpc_health_v1_health_proto_init() } @@ -262,7 +369,7 @@ func file_grpc_health_v1_health_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab4..93136610 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_List_FullMethodName = "/grpc.health.v1.Health/List" Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" ) @@ -55,9 +56,19 @@ type HealthClient interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthListResponse) + err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) @@ -128,9 +149,19 @@ type HealthServer interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(context.Context, *HealthListRequest) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -159,6 +190,9 @@ type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).List(ctx, req.(*HealthListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{ MethodName: "Check", Handler: _Health_Check_Handler, }, + { + MethodName: "List", + Handler: _Health_List_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 1e42b6fd..cc5713fd 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -51,10 +51,24 @@ var ( // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be enabled by + // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "true". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) + // to "false". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) + + // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash + // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the + // implementation of A76 is stable, we will flip the default value to false + // in a subsequent release. A final release will remove this environment + // variable, enabling the new behavior unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + + // RingHashSetRequestHashKey is set if the ring hash balancer can get the + // request hash header by setting the "requestHashHeader" field, according + // to gRFC A76. It can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 13e1f386..2ce012cd 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -259,6 +259,13 @@ var ( // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for // testing purposes. SetBufferPoolingThresholdForTesting any // func(int) + + // TimeAfterFunc is used to create timers. During tests the function is + // replaced to track allocated timers and fail the test if a timer isn't + // cancelled. + TimeAfterFunc = func(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) + } ) // HealthChecker defines the signature of the client-side LB channel health @@ -300,3 +307,9 @@ type EnforceSubConnEmbedding interface { type EnforceClientConnEmbedding interface { enforceClientConnEmbedding() } + +// Timer is an interface to allow injecting different time.Timer implementations +// during tests. +type Timer interface { + Stop() bool +} diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb71..c4055bc0 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool { return false } -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { +// ValidateKey validates a key with the following rules (pseudo-headers are +// skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +func ValidateKey(key string) error { // key should not be empty if key == "" { return fmt.Errorf("there is an empty key in the header") @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error { return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) } } + return nil +} + +// ValidatePair validates a key-value pair with the following rules +// (pseudo-header are skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding +// value is performed. +// - the characters in every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + if err := ValidateKey(key); err != nil { + return err + } if strings.HasSuffix(key, "-bin") { return nil } diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index a6c64701..7b93f692 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -44,15 +44,19 @@ var ( // // It implements the [resolver.Resolver] interface. type delegatingResolver struct { - target resolver.Target // parsed target URI to be resolved - cc resolver.ClientConn // gRPC ClientConn - targetResolver resolver.Resolver // resolver for the target URI, based on its scheme - proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured - proxyURL *url.URL // proxy URL, derived from proxy environment and target + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target mu sync.Mutex // protects all the fields below targetResolverState *resolver.State // state of the target resolver proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured } // nopResolver is a resolver that does nothing. @@ -111,6 +115,10 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti logger.Infof("Proxy URL detected : %s", r.proxyURL) } + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() // When the scheme is 'dns' and target resolution on client is not enabled, // resolution should be handled by the proxy, not the client. Therefore, we // bypass the target resolver and store the unresolved target address. @@ -165,11 +173,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol } func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.ResolveNow(o) r.proxyResolver.ResolveNow(o) } func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.Close() r.targetResolver = nil @@ -267,11 +279,17 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro err := r.updateClientConnStateLocked() // Another possible approach was to block until updates are received from // both resolvers. But this is not used because calling `New()` triggers - // `Build()` for the first resolver, which calls `UpdateState()`. And the + // `Build()` for the first resolver, which calls `UpdateState()`. And the // second resolver hasn't sent an update yet, so it would cause `New()` to // block indefinitely. if err != nil { - r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return err } @@ -291,7 +309,13 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err r.targetResolverState = &state err := r.updateClientConnStateLocked() if err != nil { - r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index 8ed347c5..ccc0e017 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -59,7 +59,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { return b, err } -// Close closes the stream and popagates err to any readers. +// Close closes the stream and propagates err to any readers. func (s *ClientStream) Close(err error) { var ( rst bool diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 513dbb93..ae931666 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1390,8 +1390,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 997b0a59..7e53eb17 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" @@ -598,6 +599,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. + if timeoutSet { + // We need to wait for s.cancel to be updated before calling + // t.closeStream to avoid data races. + cancelUpdated := make(chan struct{}) + timer := internal.TimeAfterFunc(timeout, func() { + <-cancelUpdated + t.closeStream(s, true, http2.ErrCodeCancel, false) + }) + oldCancel := s.cancel + s.cancel = func() { + oldCancel() + timer.Stop() + } + close(cancelUpdated) + } t.mu.Unlock() if channelz.IsOn() { t.channelz.SocketMetrics.StreamsStarted.Add(1) @@ -1274,7 +1291,6 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { - t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { delete(t.activeStreams, s.id) @@ -1324,7 +1340,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - s.swapState(streamDone) + oldState := s.swapState(streamDone) + if oldState == streamDone { + return + } t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index a22a9015..cf8da0b5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -35,8 +35,10 @@ type ServerStream struct { *Stream // Embed for common stream functionality. st internalServerTransport - ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) - cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + // cancel is invoked at the end of stream to cancel ctx. It also stops the + // timer for monitoring the rpc deadline if configured. + cancel func() // Holds compressor names passed in grpc-accept-encoding metadata from the // client. diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb..c3c15ac9 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -18,16 +18,28 @@ package resolver -type addressMapEntry struct { +import ( + "encoding/base64" + "sort" + "strings" +) + +type addressMapEntry[T any] struct { addr Address - value any + value T } -// AddressMap is a map of addresses to arbitrary values taking into account +// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming +// release of grpc-go. +// +// Deprecated: use the generic AddressMapV2 type instead. +type AddressMap = AddressMapV2[any] + +// AddressMapV2 is a map of addresses to arbitrary values taking into account // Attributes. BalancerAttributes are ignored, as are Metadata and Type. // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. -type AddressMap struct { +type AddressMapV2[T any] struct { // The underlying map is keyed by an Address with fields that we don't care // about being set to their zero values. The only fields that we care about // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to @@ -41,23 +53,30 @@ type AddressMap struct { // The value type of the map contains a slice of addresses which match the key // in their `Addr` and `ServerName` fields and contain the corresponding value // associated with them. - m map[Address]addressMapEntryList + m map[Address]addressMapEntryList[T] } func toMapKey(addr *Address) Address { return Address{Addr: addr.Addr, ServerName: addr.ServerName} } -type addressMapEntryList []*addressMapEntry +type addressMapEntryList[T any] []*addressMapEntry[T] -// NewAddressMap creates a new AddressMap. +// NewAddressMap creates a new AddressMapV2[any]. +// +// Deprecated: use the generic NewAddressMapV2 constructor instead. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} + return NewAddressMapV2[any]() +} + +// NewAddressMapV2 creates a new AddressMapV2. +func NewAddressMapV2[T any]() *AddressMapV2[T] { + return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. -func (l addressMapEntryList) find(addr Address) int { +func (l addressMapEntryList[T]) find(addr Address) int { for i, entry := range l { // Attributes are the only thing to match on here, since `Addr` and // `ServerName` are already equal. @@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMapV2[T]) Set(addr Address, value T) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { entryList[entry].value = value return } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value}) } // Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { +func (a *AddressMapV2[T]) Delete(addr Address) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] entry := entryList.find(addr) @@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) { } // Len returns the number of entries in the map. -func (a *AddressMap) Len() int { +func (a *AddressMapV2[T]) Len() int { ret := 0 for _, entryList := range a.m { ret += len(entryList) @@ -116,7 +135,7 @@ func (a *AddressMap) Len() int { } // Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { +func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { @@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMapV2[T]) Values() []T { + ret := make([]T, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any +type EndpointMap[T any] struct { + endpoints map[endpointMapKey]endpointData[T] +} + +type endpointData[T any] struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value T } // NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), +func NewEndpointMap[T any]() *EndpointMap[T] { + return &EndpointMap[T]{ + endpoints: make(map[endpointMapKey]endpointData[T]), } } +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) + } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) +} + // Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) { + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return +func (em *EndpointMap[T]) Set(e Endpoint, value T) { + en := encodeEndpoint(e) + em.endpoints[en] = endpointData[T]{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { +func (em *EndpointMap[T]) Len() int { return len(em.endpoints) } @@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { +func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } // Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) +func (em *EndpointMap[T]) Values() []T { + ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } +func (em *EndpointMap[T]) Delete(e Endpoint) { + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 945e24ff..80e16a32 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -134,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -172,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -210,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index a8ddb0af..ad20e9df 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -870,13 +870,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) } - out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool) + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) if err != nil { out.Free() return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) } - if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) { + if out.Len() > maxReceiveMessageSize { out.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) } @@ -885,12 +891,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") } -// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF. -func atEOF(dcReader io.Reader) bool { - n, err := dcReader.Read(make([]byte, 1)) - return n == 0 && err == io.EOF -} - type recvCompressor interface { RecvCompress() string } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d5..baf7740e 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,7 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC attempt begins. +// Begin contains stats for the start of an RPC attempt. +// +// - Server-side: Triggered after `InHeader`, as headers are processed +// before the RPC lifecycle begins. +// - Client-side: The first stats event recorded. +// // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. @@ -69,7 +74,7 @@ func (*PickerUpdated) IsClient() bool { return true } func (*PickerUpdated) isRPCStats() {} -// InPayload contains the information for an incoming payload. +// InPayload contains stats about an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool @@ -98,7 +103,9 @@ func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} -// InHeader contains stats when a header is received. +// InHeader contains stats about header reception. +// +// - Server-side: The first stats event after the RPC request is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool @@ -123,7 +130,7 @@ func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} -// InTrailer contains stats when a trailer is received. +// InTrailer contains stats about trailer reception. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool @@ -139,7 +146,7 @@ func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} -// OutPayload contains the information for an outgoing payload. +// OutPayload contains stats about an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool @@ -166,7 +173,10 @@ func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} -// OutHeader contains stats when a header is sent. +// OutHeader contains stats about header transmission. +// +// - Client-side: Only occurs after 'Begin', as headers are always the first +// thing sent on a stream. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool @@ -189,14 +199,15 @@ func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} -// OutTrailer contains stats when a trailer is sent. +// OutTrailer contains stats about trailer transmission. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. + // Deprecated: This field is never set. The length is not known when this + // message is emitted because the trailer fields are compressed with hpack + // after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. @@ -208,7 +219,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} -// End contains stats when an RPC ends. +// End contains stats about RPC completion. type End struct { // Client is true if this End is from client side. Client bool @@ -238,7 +249,7 @@ type ConnStats interface { IsClient() bool } -// ConnBegin contains the stats of a connection when it is established. +// ConnBegin contains stats about connection establishment. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool @@ -249,7 +260,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} -// ConnEnd contains the stats of a connection when it ends. +// ConnEnd contains stats about connection termination. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 783c41f7..90237b1d 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.71.0" +const Version = "1.72.0" diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go index 167baf68..58751ed0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors provides detailed error types for api field validation. -package errors // import "k8s.io/apimachinery/pkg/api/errors" +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go index b6d42acf..a3b18a5c 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package meta provides functions for retrieving API metadata from objects // belonging to the Kubernetes API -package meta // import "k8s.io/apimachinery/pkg/api/meta" +package meta diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 1fdd32c4..468afd0e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -221,6 +221,9 @@ func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { if err != nil { return nil, err } + if items.IsNil() { + return nil, nil + } list := make([]runtime.Object, items.Len()) if len(list) == 0 { return list, nil diff --git a/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go new file mode 100644 index 00000000..9f5ae7a9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operation + +import "k8s.io/apimachinery/pkg/util/sets" + +// Operation provides contextual information about a validation request and the API +// operation being validated. +// This type is intended for use with generate validation code and may be enhanced +// in the future to include other information needed to validate requests. +type Operation struct { + // Type is the category of operation being validated. This does not + // differentiate between HTTP verbs like PUT and PATCH, but rather merges + // those into a single "Update" category. + Type Type + + // Options declare the options enabled for validation. + // + // Options should be set according to a resource validation strategy before validation + // is performed, and must be treated as read-only during validation. + // + // Options are identified by string names. Option string names may match the name of a feature + // gate, in which case the presence of the name in the set indicates that the feature is + // considered enabled for the resource being validated. Note that a resource may have a + // feature enabled even when the feature gate is disabled. This can happen when feature is + // already in-use by a resource, often because the feature gate was enabled when the + // resource first began using the feature. + // + // Unset options are disabled/false. + Options sets.Set[string] +} + +// Code is the request operation to be validated. +type Type uint32 + +const ( + // Create indicates the request being validated is for a resource create operation. + Create Type = iota + + // Update indicates the request being validated is for a resource update operation. + Update +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 7736753d..617b9a5d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=meta.k8s.io -package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" +package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go index 3cf9d48e..a5f437b4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -20,21 +20,22 @@ limitations under the License. package v1 import ( + "math/rand" "time" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface. -func (t *MicroTime) Fuzz(c fuzz.Continue) { +// Fuzz satisfies randfill.SimpleSelfFiller. +func (t *MicroTime) RandFill(r *rand.Rand) { if t == nil { return } // Allow for about 1000 years of randomness. Accurate to a tenth of // micro second. Leave off nanoseconds because JSON doesn't // represent them so they can't round-trip properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) + t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 1000*r.Int63n(1000000)) } -// ensure MicroTime implements fuzz.Interface -var _ fuzz.Interface = &MicroTime{} +// ensure MicroTime implements randfill.Interface +var _ randfill.SimpleSelfFiller = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go index bf9e21b5..48fb9784 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -20,21 +20,22 @@ limitations under the License. package v1 import ( + "math/rand" "time" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { +// Fuzz satisfies randfill.SimpleSelfFiller. +func (t *Time) RandFill(r *rand.Rand) { if t == nil { return } // Allow for about 1000 years of randomness. Leave off nanoseconds // because JSON doesn't represent them so they can't round-trip // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) + t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 0) } -// ensure Time implements fuzz.Interface -var _ fuzz.Interface = &Time{} +// ensure Time implements randfill.SimpleSelfFiller +var _ randfill.SimpleSelfFiller = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 71f7b163..59f43b7b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -188,7 +188,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b // NestedStringMap returns a copy of map[string]string value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map. func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { - m, found, err := nestedMapNoCopy(obj, fields...) + m, found, err := nestedMapNoCopy(obj, false, fields...) if !found || err != nil { return nil, found, err } @@ -203,10 +203,32 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s return strMap, true, nil } +// NestedNullCoercingStringMap returns a copy of map[string]string value of a nested field. +// Returns `nil, true, nil` if the value exists and is explicitly null. +// Returns `nil, false, err` if the value is not a map or a null value, or is a map and contains non-string non-null values. +// Null values in the map are coerced to "" to match json decoding behavior. +func NestedNullCoercingStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + m, found, err := nestedMapNoCopy(obj, true, fields...) + if !found || err != nil || m == nil { + return nil, found, err + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else if v == nil { + strMap[k] = "" + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) + } + } + return strMap, true, nil +} + // NestedMap returns a deep copy of map[string]interface{} value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{}. func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { - m, found, err := nestedMapNoCopy(obj, fields...) + m, found, err := nestedMapNoCopy(obj, false, fields...) if !found || err != nil { return nil, found, err } @@ -215,11 +237,14 @@ func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interfa // nestedMapNoCopy returns a map[string]interface{} value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{}. -func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { +func nestedMapNoCopy(obj map[string]interface{}, tolerateNil bool, fields ...string) (map[string]interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } + if val == nil && tolerateNil { + return nil, true, nil + } m, ok := val.(map[string]interface{}) if !ok { return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 5e36a91e..fdb0c862 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -397,7 +397,7 @@ func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds } func (u *Unstructured) GetLabels() map[string]string { - m, _, _ := NestedStringMap(u.Object, "metadata", "labels") + m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "labels") return m } @@ -410,7 +410,7 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - m, _, _ := NestedStringMap(u.Object, "metadata", "annotations") + m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "annotations") return m } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go index 7415d816..0c46ef2d 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -21,4 +21,4 @@ limitations under the License. // but for the fields which did not change, copying is automated. This makes it // easy to modify the structures you use in memory without affecting the format // you store on disk or respond to in your external API calls. -package conversion // import "k8s.io/apimachinery/pkg/conversion" +package conversion diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go index 7b763de6..4c1002a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package queryparams provides conversion from versioned // runtime objects to URL query values -package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams" +package queryparams diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go index c39b8039..49059e26 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package fields implements a simple field system, parsing and matching // selectors with sets of fields. -package fields // import "k8s.io/apimachinery/pkg/fields" +package fields diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go index 82de0051..35ba7880 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package labels implements a simple label system, parsing and matching // selectors with sets of labels. -package labels // import "k8s.io/apimachinery/pkg/labels" +package labels diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go index 89feb401..b54429bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -48,4 +48,4 @@ limitations under the License. // // As a bonus, a few common types useful from all api objects and versions // are provided in types.go. -package runtime // import "k8s.io/apimachinery/pkg/runtime" +package runtime diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 2703300c..202bf4f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -259,6 +259,7 @@ type ObjectDefaulter interface { type ObjectVersioner interface { ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + PrioritizedVersionsForGroup(group string) []schema.GroupVersion } // ObjectConvertor converts an object to a different version. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index a5b11671..fde87f1a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -17,15 +17,18 @@ limitations under the License. package runtime import ( + "context" "fmt" "reflect" "strings" + "k8s.io/apimachinery/pkg/api/operation" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/naming" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" ) // Scheme defines methods for serializing and deserializing API objects, a type @@ -68,6 +71,12 @@ type Scheme struct { // the provided object must be a pointer. defaulterFuncs map[reflect.Type]func(interface{}) + // validationFuncs is a map to funcs to be called with an object to perform validation. + // The provided object must be a pointer. + // If oldObject is non-nil, update validation is performed and may perform additional + // validation such as transition rules and immutability checks. + validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList + // converter stores all registered conversion functions. It also has // default converting behavior. converter *conversion.Converter @@ -96,6 +105,7 @@ func NewScheme() *Scheme { unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, + validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{}, versionPriority: map[string][]string{}, schemeName: naming.GetNameFromCallsite(internalPackages...), } @@ -347,6 +357,35 @@ func (s *Scheme) Default(src Object) { } } +// AddValidationFunc registered a function that can validate the object, and +// oldObject. These functions will be invoked when Validate() or ValidateUpdate() +// is called. The function will never be called unless the validated object +// matches srcType. If this function is invoked twice with the same srcType, the +// fn passed to the later call will be used instead. +func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) { + s.validationFuncs[reflect.TypeOf(srcType)] = fn +} + +// Validate validates the provided Object according to the generated declarative validation code. +// WARNING: This does not validate all objects! The handwritten validation code in validation.go +// is not run when this is called. Only the generated zz_generated.validations.go validation code is run. +func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList { + if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok { + return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...) + } + return nil +} + +// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code. +// WARNING: This does not validate all objects! The handwritten validation code in validation.go +// is not run when this is called. Only the generated zz_generated.validations.go validation code is run. +func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList { + if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok { + return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...) + } + return nil +} + // Convert will attempt to convert in into out. Both must be pointers. For easy // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go index 858529e9..e550ea34 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -140,7 +140,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec var wg sync.WaitGroup wg.Add(1) defer wg.Done() - c = checker{ + placeholder := checker{ safe: func() bool { wg.Wait() return c.safe() @@ -150,7 +150,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec return c.check(rv, depth) }, } - if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded { // Someone else stored an entry for this type, use it. return *actual.(*checker) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 77bb3074..81286fcc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -28,7 +28,7 @@ import ( func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo { jsonSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON}, ) jsonSerializerType := runtime.SerializerInfo{ MediaType: runtime.ContentTypeJSON, @@ -38,7 +38,7 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option Serializer: jsonSerializer, StrictSerializer: json.NewSerializerWithOptions( mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON}, ), StreamSerializer: &runtime.StreamSerializerInfo{ EncodesAsText: true, @@ -61,7 +61,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: true}, ) - protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoSerializer := protobuf.NewSerializerWithOptions(scheme, scheme, protobuf.SerializerOptions{ + StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToProtobuf, + }) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) serializers := []runtime.SerializerInfo{ @@ -113,6 +115,9 @@ type CodecFactoryOptions struct { // Pretty includes a pretty serializer along with the non-pretty one Pretty bool + StreamingCollectionsEncodingToJSON bool + StreamingCollectionsEncodingToProtobuf bool + serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo } @@ -147,6 +152,18 @@ func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.S } } +func WithStreamingCollectionEncodingToJSON() CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.StreamingCollectionsEncodingToJSON = true + } +} + +func WithStreamingCollectionEncodingToProtobuf() CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.StreamingCollectionsEncodingToProtobuf = true + } +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go new file mode 100644 index 00000000..075163dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go @@ -0,0 +1,230 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + "io" + "maps" + "slices" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/conversion" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) { + list, ok := obj.(*unstructured.UnstructuredList) + if ok { + return true, streamingEncodeUnstructuredList(w, list) + } + if _, ok := obj.(json.Marshaler); ok { + return false, nil + } + typeMeta, listMeta, items, err := getListMeta(obj) + if err == nil { + return true, streamingEncodeList(w, typeMeta, listMeta, items) + } + return false, nil +} + +// getListMeta implements list extraction logic for json stream serialization. +// +// Reason for a custom logic instead of reusing accessors from meta package: +// * Validate json tags to prevent incompatibility with json standard package. +// * ListMetaAccessor doesn't distinguish empty from nil value. +// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}" +func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) { + listValue, err := conversion.EnforcePtr(list) + if err != nil { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err + } + listType := listValue.Type() + if listType.NumField() != 3 { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields") + } + // TypeMeta + typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta) + if !ok { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type") + } + if listType.Field(0).Tag.Get("json") != ",inline" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`) + } + // ListMeta + listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) + if !ok { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type") + } + if listType.Field(1).Tag.Get("json") != "metadata,omitempty" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`) + } + // Items + items, err := meta.ExtractList(list) + if err != nil { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err + } + if listType.Field(2).Tag.Get("json") != "items" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`) + } + return typeMeta, listMeta, items, nil +} + +func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error { + // Start + if _, err := w.Write([]byte(`{`)); err != nil { + return err + } + + // TypeMeta + if typeMeta.Kind != "" { + if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil { + return err + } + } + if typeMeta.APIVersion != "" { + if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil { + return err + } + } + + // ListMeta + if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil { + return err + } + + // Items + if err := encodeItemsObjectSlice(w, items); err != nil { + return err + } + + // End + _, err := w.Write([]byte("}\n")) + return err +} + +func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) { + if items == nil { + err := encodeKeyValuePair(w, "items", nil, nil) + return err + } + _, err = w.Write([]byte(`"items":[`)) + if err != nil { + return err + } + suffix := []byte(",") + for i, item := range items { + if i == len(items)-1 { + suffix = nil + } + err := encodeValue(w, item, suffix) + if err != nil { + return err + } + } + _, err = w.Write([]byte("]")) + if err != nil { + return err + } + return err +} + +func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error { + _, err := w.Write([]byte(`{`)) + if err != nil { + return err + } + keys := slices.Collect(maps.Keys(list.Object)) + if _, exists := list.Object["items"]; !exists { + keys = append(keys, "items") + } + sort.Strings(keys) + + suffix := []byte(",") + for i, key := range keys { + if i == len(keys)-1 { + suffix = nil + } + if key == "items" { + err = encodeItemsUnstructuredSlice(w, list.Items, suffix) + } else { + err = encodeKeyValuePair(w, key, list.Object[key], suffix) + } + if err != nil { + return err + } + } + _, err = w.Write([]byte("}\n")) + return err +} + +func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) { + _, err = w.Write([]byte(`"items":[`)) + if err != nil { + return err + } + comma := []byte(",") + for i, item := range items { + if i == len(items)-1 { + comma = nil + } + err := encodeValue(w, item.Object, comma) + if err != nil { + return err + } + } + _, err = w.Write([]byte("]")) + if err != nil { + return err + } + if len(suffix) > 0 { + _, err = w.Write(suffix) + } + return err +} + +func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) { + err = encodeValue(w, key, []byte(":")) + if err != nil { + return err + } + err = encodeValue(w, value, suffix) + if err != nil { + return err + } + return err +} + +func encodeValue(w io.Writer, value any, suffix []byte) error { + data, err := json.Marshal(value) + if err != nil { + return err + } + _, err = w.Write(data) + if err != nil { + return err + } + if len(suffix) > 0 { + _, err = w.Write(suffix) + } + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 1ae4a32e..24f66a10 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -36,7 +36,7 @@ import ( // is not nil, the object has the group, version, and kind fields set. // Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer @@ -44,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim // matches JSON, and will error if constructs are used that do not serialize to JSON. // Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false, false}) } // NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML @@ -93,6 +93,9 @@ type SerializerOptions struct { // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. Strict bool + + // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed. + StreamingCollectionsEncoding bool } // Serializer handles encoding versioned objects into the proper JSON form @@ -242,6 +245,15 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { _, err = w.Write(data) return err } + if s.options.StreamingCollectionsEncoding { + ok, err := streamEncodeCollections(obj, w) + if err != nil { + return err + } + if ok { + return nil + } + } encoder := json.NewEncoder(w) return encoder.Encode(obj) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go new file mode 100644 index 00000000..754a8082 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go @@ -0,0 +1,174 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "errors" + "io" + "math/bits" + + "github.com/gogo/protobuf/proto" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + errFieldCount = errors.New("expected ListType to have 3 fields") + errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type") + errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`) + errListMetaField = errors.New("expected ListMeta field to have ListMeta type") + errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`) + errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`) + errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`) +) + +// getStreamingListData implements list extraction logic for protobuf stream serialization. +// +// Reason for a custom logic instead of reusing accessors from meta package: +// * Validate proto tags to prevent incompatibility with proto standard package. +// * ListMetaAccessor doesn't distinguish empty from nil value. +// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}" +func getStreamingListData(list runtime.Object) (data streamingListData, err error) { + listValue, err := conversion.EnforcePtr(list) + if err != nil { + return data, err + } + listType := listValue.Type() + if listType.NumField() != 3 { + return data, errFieldCount + } + // TypeMeta: validated, but not returned as is not serialized. + _, ok := listValue.Field(0).Interface().(metav1.TypeMeta) + if !ok { + return data, errTypeMetaField + } + if listType.Field(0).Tag.Get("protobuf") != "" { + return data, errTypeMetaProtobufTag + } + // ListMeta + listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) + if !ok { + return data, errListMetaField + } + // if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta. + if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" { + return data, errListMetaProtobufTag + } + data.listMeta = listMeta + // Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items. + if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" { + return data, errItemsProtobufTag + } + items, err := meta.ExtractList(list) + if err != nil { + return data, err + } + data.items = items + data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items) + return data, err +} + +type streamingListData struct { + // totalSize is the total size of the serialized List object, including their proto headers/size bytes + totalSize int + + // listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size) + listMetaSize int + listMeta metav1.ListMeta + + // itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size) + itemsSizes []int + items []runtime.Object +} + +// listSize return size of ListMeta and items to be later used for preallocations. +// listMetaSize and itemSizes do not include header bytes (field identifier, size). +func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) { + // ListMeta + listMetaSize = listMeta.Size() + totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize + // Items + itemSizes = make([]int, len(items)) + for i, item := range items { + sizer, ok := item.(proto.Sizer) + if !ok { + return totalSize, listMetaSize, nil, errItemsSizer + } + n := sizer.Size() + itemSizes[i] = n + totalSize += 1 + sovGenerated(uint64(n)) + n + } + return totalSize, listMetaSize, itemSizes, nil +} + +func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error { + _, err := w.Write(protoEncodingPrefix) + if err != nil { + return err + } + // encodeList is responsible for encoding the List into the unknown Raw. + encodeList := func(writer io.Writer) (int, error) { + return streamingEncodeList(writer, listData, memAlloc) + } + _, err = unk.MarshalToWriter(w, listData.totalSize, encodeList) + return err +} + +func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) { + // ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure + n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc) + size += n + if err != nil { + return size, err + } + // Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure + for i, item := range listData.items { + n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +func writeVarintGenerated(w io.Writer, v int) (int, error) { + buf := make([]byte, sovGenerated(uint64(v))) + encodeVarintGenerated(buf, len(buf), uint64(v)) + return w.Write(buf) +} + +// sovGenerated is copied from `generated.pb.go` returns size of varint. +func sovGenerated(v uint64) int { + return (bits.Len64(v|1) + 6) / 7 +} + +// encodeVarintGenerated is copied from `generated.pb.go` encodes varint. +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go index 72d0ac79..381748d6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package protobuf provides a Kubernetes serializer for the protobuf format. -package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +package protobuf diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index c63e6dc6..c66c49ac 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -72,10 +72,18 @@ func IsNotMarshalable(err error) bool { // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(creater, typer, SerializerOptions{}) +} + +// NewSerializerWithOptions creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +func NewSerializerWithOptions(creater runtime.ObjectCreater, typer runtime.ObjectTyper, opts SerializerOptions) *Serializer { return &Serializer{ prefix: protoEncodingPrefix, creater: creater, typer: typer, + options: opts, } } @@ -84,6 +92,14 @@ type Serializer struct { prefix []byte creater runtime.ObjectCreater typer runtime.ObjectTyper + + options SerializerOptions +} + +// SerializerOptions holds the options which are used to configure a Proto serializer. +type SerializerOptions struct { + // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed. + StreamingCollectionsEncoding bool } var _ runtime.Serializer = &Serializer{} @@ -209,6 +225,13 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime. }, } } + if s.options.StreamingCollectionsEncoding { + listData, err := getStreamingListData(obj) + if err == nil { + // Doesn't honor custom proto marshaling methods (like json streaming), because all proto objects implement proto methods. + return streamingEncodeUnknownList(w, unk, listData, memAlloc) + } + } switch t := obj.(type) { case bufferedMarshaller: @@ -428,6 +451,39 @@ func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime } func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + _, err := doEncode(obj, w, nil, memAlloc) + return err +} + +func doEncodeWithHeader(obj any, w io.Writer, field byte, precomputedSize int, memAlloc runtime.MemoryAllocator) (size int, err error) { + // Field identifier + n, err := w.Write([]byte{field}) + size += n + if err != nil { + return size, err + } + // Size + n, err = writeVarintGenerated(w, precomputedSize) + size += n + if err != nil { + return size, err + } + // Obj + n, err = doEncode(obj, w, &precomputedSize, memAlloc) + size += n + if err != nil { + return size, err + } + if n != precomputedSize { + return size, fmt.Errorf("the size value was %d, but doEncode wrote %d bytes to data", precomputedSize, n) + } + return size, nil +} + +// doEncode encodes provided object into writer using a allocator if possible. +// Avoids call by object Size if precomputedObjSize is provided. +// precomputedObjSize should not include header bytes (field identifier, size). +func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.MemoryAllocator) (int, error) { if memAlloc == nil { klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator") memAlloc = &runtime.SimpleAllocator{} @@ -436,40 +492,43 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runti case bufferedReverseMarshaller: // this path performs a single allocation during write only when the Allocator wasn't provided // it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods - encodedSize := uint64(t.Size()) - data := memAlloc.Allocate(encodedSize) + if precomputedObjSize == nil { + s := t.Size() + precomputedObjSize = &s + } + data := memAlloc.Allocate(uint64(*precomputedObjSize)) n, err := t.MarshalToSizedBuffer(data) if err != nil { - return err + return 0, err } - _, err = w.Write(data[:n]) - return err + return w.Write(data[:n]) case bufferedMarshaller: // this path performs a single allocation during write only when the Allocator wasn't provided // it also requires the caller to implement the more efficient Size and MarshalTo methods - encodedSize := uint64(t.Size()) - data := memAlloc.Allocate(encodedSize) + if precomputedObjSize == nil { + s := t.Size() + precomputedObjSize = &s + } + data := memAlloc.Allocate(uint64(*precomputedObjSize)) n, err := t.MarshalTo(data) if err != nil { - return err + return 0, err } - _, err = w.Write(data[:n]) - return err + return w.Write(data[:n]) case proto.Marshaler: // this path performs extra allocations data, err := t.Marshal() if err != nil { - return err + return 0, err } - _, err = w.Write(data) - return err + return w.Write(data) default: - return errNotMarshalable{reflect.TypeOf(obj)} + return 0, errNotMarshalable{reflect.TypeOf(obj)} } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index a82227b2..27a2064c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "io" ) type ProtobufMarshaller interface { @@ -28,6 +29,124 @@ type ProtobufReverseMarshaller interface { MarshalToSizedBuffer(data []byte) (int, error) } +const ( + typeMetaTag = 0xa + rawTag = 0x12 + contentEncodingTag = 0x1a + contentTypeTag = 0x22 + + // max length of a varint for a uint64 + maxUint64VarIntLength = 10 +) + +// MarshalToWriter allows a caller to provide a streaming writer for raw bytes, +// instead of populating them inside the Unknown struct. +// rawSize is the number of bytes rawWriter will write in a success case. +// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error. +func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) { + size := 0 + + // reuse the buffer for varint marshaling + varintBuffer := make([]byte, maxUint64VarIntLength) + writeVarint := func(i int) (int, error) { + offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i)) + return w.Write(varintBuffer[offset:]) + } + + // TypeMeta + { + n, err := w.Write([]byte{typeMetaTag}) + size += n + if err != nil { + return size, err + } + + typeMetaBytes, err := m.TypeMeta.Marshal() + if err != nil { + return size, err + } + + n, err = writeVarint(len(typeMetaBytes)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write(typeMetaBytes) + size += n + if err != nil { + return size, err + } + } + + // Raw, delegating write to writeRaw() + { + n, err := w.Write([]byte{rawTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(rawSize) + size += n + if err != nil { + return size, err + } + + n, err = writeRaw(w) + size += n + if err != nil { + return size, err + } + if n != int(rawSize) { + return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n) + } + } + + // ContentEncoding + { + n, err := w.Write([]byte{contentEncodingTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(len(m.ContentEncoding)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write([]byte(m.ContentEncoding)) + size += n + if err != nil { + return size, err + } + } + + // ContentEncoding + { + n, err := w.Write([]byte{contentTypeTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(len(m.ContentType)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write([]byte(m.ContentType)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown // that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { @@ -43,12 +162,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 copy(data[i:], m.ContentType) i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) i-- - data[i] = 0x22 + data[i] = contentTypeTag i -= len(m.ContentEncoding) copy(data[i:], m.ContentEncoding) i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) i-- - data[i] = 0x1a + data[i] = contentEncodingTag if b != nil { if r, ok := b.(ProtobufReverseMarshaller); ok { n1, err := r.MarshalToSizedBuffer(data[:i]) @@ -75,7 +194,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 } i = encodeVarintGenerated(data, i, size) i-- - data[i] = 0x12 + data[i] = rawTag } n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) if err != nil { @@ -84,6 +203,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 i -= n2 i = encodeVarintGenerated(data, i, uint64(n2)) i-- - data[i] = 0xa + data[i] = typeMetaTag return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go index 5667fa99..783cbcdc 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package types implements various generic types used throughout kubernetes. -package types // import "k8s.io/apimachinery/pkg/types" +package types diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go index 5d4d6250..b3b39bc3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors implements various utility functions and types around errors. -package errors // import "k8s.io/apimachinery/pkg/util/errors" +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 1ab8fd39..f18845a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -91,12 +91,12 @@ func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { } n, err := io.ReadAtLeast(r.r, data[:max], int(max)) r.remaining -= n - if err == io.ErrShortBuffer || r.remaining > 0 { - return n, io.ErrShortBuffer - } if err != nil { return n, err } + if r.remaining > 0 { + return n, io.ErrShortBuffer + } if n != expect { return n, io.ErrUnexpectedEOF } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go index a502b5ad..2d6f6a0c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -20,24 +20,24 @@ limitations under the License. package intstr import ( - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { +// RandFill satisfies randfill.NativeSelfFiller +func (intstr *IntOrString) RandFill(c randfill.Continue) { if intstr == nil { return } - if c.RandBool() { + if c.Bool() { intstr.Type = Int - c.Fuzz(&intstr.IntVal) + c.Fill(&intstr.IntVal) intstr.StrVal = "" } else { intstr.Type = String intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) + c.Fill(&intstr.StrVal) } } // ensure IntOrString implements fuzz.Interface -var _ fuzz.Interface = &IntOrString{} +var _ randfill.NativeSelfFiller = &IntOrString{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index df374949..de97deae 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -36,6 +36,11 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. +// +// The code invoking these handlers prepares a contextual logger so that +// klog.FromContext(ctx) already skips over the panic handler itself and +// several other intermediate functions, ideally such that the log output +// is attributed to the code which triggered the panic. var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via @@ -45,7 +50,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic} // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. // -// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. +// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) @@ -74,10 +79,30 @@ func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(cont } } -// handleCrash is the common implementation of HandleCrash and HandleCrash. +// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + ctx := klog.NewContext(context.Background(), logger) + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of the HandleCrash* variants. // Having those call a common implementation ensures that the stack depth // is the same regardless through which path the handlers get invoked. func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + ctx = klog.NewContext(ctx, logger) + for _, fn := range PanicHandlers { fn(ctx, r) } @@ -106,11 +131,7 @@ func logPanic(ctx context.Context, r interface{}) { stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] - // We don't really know how many call frames to skip because the Go - // panic handler is between us and the code where the panic occurred. - // If it's one function (as in Go 1.21), then skipping four levels - // gets us to the function which called the `defer HandleCrashWithontext(...)`. - logger := klog.FromContext(ctx).WithCallDepth(4) + logger := klog.FromContext(ctx) // For backwards compatibility, conversion to string // is handled here instead of defering to the logging @@ -176,12 +197,19 @@ func HandleError(err error) { // and key/value pairs. // // This variant should be used instead of HandleError because it supports -// structured, contextual logging. +// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can +// be used if a logger is available instead of a context. func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { handleError(ctx, err, msg, keysAndValues...) } -// handleError is the common implementation of HandleError and HandleErrorWithContext. +// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts +// a logger for contextual logging. +func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) { + handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...) +} + +// handleError is the common implementation of the HandleError* variants. // Using this common implementation ensures that the stack depth // is the same regardless through which path the handlers get invoked. func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index fd281bdb..19488339 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package sets has generic set and specified sets. Generic set will // replace specified ones over time. And specific ones are deprecated. -package sets // import "k8s.io/apimachinery/pkg/util/sets" +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go new file mode 100644 index 00000000..1d15deae --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go @@ -0,0 +1,212 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// ErrorMatcher is a helper for comparing Error objects. +type ErrorMatcher struct { + // TODO(thockin): consider whether type is ever NOT required, maybe just + // assume it. + matchType bool + // TODO(thockin): consider whether field could be assumed - if the + // "want" error has a nil field, don't match on field. + matchField bool + // TODO(thockin): consider whether value could be assumed - if the + // "want" error has a nil value, don't match on field. + matchValue bool + matchOrigin bool + matchDetail func(want, got string) bool + requireOriginWhenInvalid bool +} + +// Matches returns true if the two Error objects match according to the +// configured criteria. +func (m ErrorMatcher) Matches(want, got *Error) bool { + if m.matchType && want.Type != got.Type { + return false + } + if m.matchField && want.Field != got.Field { + return false + } + if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) { + return false + } + if m.matchOrigin { + if want.Origin != got.Origin { + return false + } + if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid { + if want.Origin == "" || got.Origin == "" { + return false + } + } + } + if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) { + return false + } + return true +} + +// Render returns a string representation of the specified Error object, +// according to the criteria configured in the ErrorMatcher. +func (m ErrorMatcher) Render(e *Error) string { + buf := strings.Builder{} + + comma := func() { + if buf.Len() > 0 { + buf.WriteString(", ") + } + } + + if m.matchType { + comma() + buf.WriteString(fmt.Sprintf("Type=%q", e.Type)) + } + if m.matchField { + comma() + buf.WriteString(fmt.Sprintf("Field=%q", e.Field)) + } + if m.matchValue { + comma() + buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue)) + } + if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid { + comma() + buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin)) + } + if m.matchDetail != nil { + comma() + buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail)) + } + return "{" + buf.String() + "}" +} + +// Exactly returns a derived ErrorMatcher which matches all fields exactly. +func (m ErrorMatcher) Exactly() ErrorMatcher { + return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact() +} + +// ByType returns a derived ErrorMatcher which also matches by type. +func (m ErrorMatcher) ByType() ErrorMatcher { + m.matchType = true + return m +} + +// ByField returns a derived ErrorMatcher which also matches by field path. +func (m ErrorMatcher) ByField() ErrorMatcher { + m.matchField = true + return m +} + +// ByValue returns a derived ErrorMatcher which also matches by the errant +// value. +func (m ErrorMatcher) ByValue() ErrorMatcher { + m.matchValue = true + return m +} + +// ByOrigin returns a derived ErrorMatcher which also matches by the origin. +func (m ErrorMatcher) ByOrigin() ErrorMatcher { + m.matchOrigin = true + return m +} + +// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires +// the Origin field to be set when the Type is Invalid and the matcher is +// matching by Origin. +func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher { + m.requireOriginWhenInvalid = true + return m +} + +// ByDetailExact returns a derived ErrorMatcher which also matches errors by +// the exact detail string. +func (m ErrorMatcher) ByDetailExact() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return got == want + } + return m +} + +// ByDetailSubstring returns a derived ErrorMatcher which also matches errors +// by a substring of the detail string. +func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return strings.Contains(got, want) + } + return m +} + +// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a +// regular expression of the detail string, where the "want" string is assumed +// to be a valid regular expression. +func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return regexp.MustCompile(want).MatchString(got) + } + return m +} + +// TestIntf lets users pass a testing.T while not coupling this package to Go's +// testing package. +type TestIntf interface { + Helper() + Errorf(format string, args ...any) + Logf(format string, args ...any) +} + +// Test compares two ErrorLists by the criteria configured in this matcher, and +// fails the test if they don't match. If a given "want" error matches multiple +// "got" errors, they will all be consumed. This might be OK (e.g. if there are +// multiple errors on the same field from the same origin) or it might be an +// insufficiently specific matcher, so these will be logged. +func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) { + tb.Helper() + + remaining := got + for _, w := range want { + tmp := make(ErrorList, 0, len(remaining)) + n := 0 + for _, g := range remaining { + if m.Matches(w, g) { + n++ + } else { + tmp = append(tmp, g) + } + } + if n == 0 { + tb.Errorf("expected an error matching:\n%s", m.Render(w)) + } else if n > 1 { + // This is not necessarily and error, but it's worth logging in + // case it's not what the test author intended. + tb.Logf("multiple errors matched:\n%s", m.Render(w)) + } + remaining = tmp + } + if len(remaining) > 0 { + for _, e := range remaining { + exactly := m.Exactly() // makes a copy + tb.Errorf("unmatched error:\n%s", exactly.Render(e)) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index f1634bc0..840d645e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -33,13 +33,35 @@ type Error struct { Field string BadValue interface{} Detail string + + // Origin uniquely identifies where this error was generated from. It is used in testing to + // compare expected errors against actual errors without relying on exact detail string matching. + // This allows tests to verify the correct validation logic triggered the error + // regardless of how the error message might be formatted or localized. + // + // The value should be either: + // - A simple camelCase identifier (e.g., "maximum", "maxItems") + // - A structured format using "format=" for validation errors related to specific formats + // (e.g., "format=dns-label", "format=qualified-name") + // + // If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword, + // use that same name for consistency. + // + // Origin should be set in the most deeply nested validation function that + // can still identify the unique source of the error. + Origin string + + // CoveredByDeclarative is true when this error is covered by declarative + // validation. This field is to identify errors from imperative validation + // that should also be caught by declarative validation. + CoveredByDeclarative bool } var _ error = &Error{} // Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody()) } type OmitValueType struct{} @@ -48,21 +70,21 @@ var omitValue = OmitValueType{} // ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { +func (e *Error) ErrorBody() string { var s string switch { - case v.Type == ErrorTypeRequired: - s = v.Type.String() - case v.Type == ErrorTypeForbidden: - s = v.Type.String() - case v.Type == ErrorTypeTooLong: - s = v.Type.String() - case v.Type == ErrorTypeInternal: - s = v.Type.String() - case v.BadValue == omitValue: - s = v.Type.String() + case e.Type == ErrorTypeRequired: + s = e.Type.String() + case e.Type == ErrorTypeForbidden: + s = e.Type.String() + case e.Type == ErrorTypeTooLong: + s = e.Type.String() + case e.Type == ErrorTypeInternal: + s = e.Type.String() + case e.BadValue == omitValue: + s = e.Type.String() default: - value := v.BadValue + value := e.BadValue valueType := reflect.TypeOf(value) if value == nil || valueType == nil { value = "null" @@ -76,26 +98,38 @@ func (v *Error) ErrorBody() string { switch t := value.(type) { case int64, int32, float64, float32, bool: // use simple printer for simple types - s = fmt.Sprintf("%s: %v", v.Type, value) + s = fmt.Sprintf("%s: %v", e.Type, value) case string: - s = fmt.Sprintf("%s: %q", v.Type, t) + s = fmt.Sprintf("%s: %q", e.Type, t) case fmt.Stringer: // anything that defines String() is better than raw struct - s = fmt.Sprintf("%s: %s", v.Type, t.String()) + s = fmt.Sprintf("%s: %s", e.Type, t.String()) default: // fallback to raw struct // TODO: internal types have panic guards against json.Marshalling to prevent // accidental use of internal types in external serialized form. For now, use // %#v, although it would be better to show a more expressive output in the future - s = fmt.Sprintf("%s: %#v", v.Type, value) + s = fmt.Sprintf("%s: %#v", e.Type, value) } } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) + if len(e.Detail) != 0 { + s += fmt.Sprintf(": %s", e.Detail) } return s } +// WithOrigin adds origin information to the FieldError +func (e *Error) WithOrigin(o string) *Error { + e.Origin = o + return e +} + +// MarkCoveredByDeclarative marks the error as covered by declarative validation. +func (e *Error) MarkCoveredByDeclarative() *Error { + e.CoveredByDeclarative = true + return e +} + // ErrorType is a machine readable value providing more detail about why // a field is invalid. These values are expected to match 1-1 with // CauseType in api/types.go. @@ -169,32 +203,32 @@ func (t ErrorType) String() string { // TypeInvalid returns a *Error indicating "type is invalid" func TypeInvalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeTypeInvalid, field.String(), value, detail} + return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false} } // NotFound returns a *Error indicating "value not found". This is // used to report failure to find a requested value (e.g. looking up an ID). func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} + return &Error{ErrorTypeNotFound, field.String(), value, "", "", false} } // Required returns a *Error indicating "value required". This is used // to report required values that are not provided (e.g. empty strings, null // values, or empty arrays). func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} + return &Error{ErrorTypeRequired, field.String(), "", detail, "", false} } // Duplicate returns a *Error indicating "duplicate value". This is // used to report collisions of values that must be unique (e.g. names or IDs). func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} + return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false} } // Invalid returns a *Error indicating "invalid value". This is used // to report malformed values (e.g. failed regex match, too long, out of bounds). func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} + return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false} } // NotSupported returns a *Error indicating "unsupported value". @@ -209,7 +243,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E } detail = "supported values: " + strings.Join(quotedValues, ", ") } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} + return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false} } // Forbidden returns a *Error indicating "forbidden". This is used to @@ -217,7 +251,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E // some conditions, but which are not permitted by current conditions (e.g. // security policy). func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} + return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false} } // TooLong returns a *Error indicating "too long". This is used to report that @@ -231,7 +265,7 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { } else { msg = "value is too long" } - return &Error{ErrorTypeTooLong, field.String(), "", msg} + return &Error{ErrorTypeTooLong, field.String(), "", msg, "", false} } // TooLongMaxLength returns a *Error indicating "too long". @@ -259,14 +293,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { actual = omitValue } - return &Error{ErrorTypeTooMany, field.String(), actual, msg} + return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false} } // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} + return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false} } // ErrorList holds a set of Errors. It is plausible that we might one day have @@ -285,6 +319,22 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { } } +// WithOrigin sets the origin for all errors in the list and returns the updated list. +func (list ErrorList) WithOrigin(origin string) ErrorList { + for _, err := range list { + err.Origin = origin + } + return list +} + +// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation. +func (list ErrorList) MarkCoveredByDeclarative() ErrorList { + for _, err := range list { + err.CoveredByDeclarative = true + } + return list +} + // ToAggregate converts the ErrorList into an errors.Aggregate. func (list ErrorList) ToAggregate() utilerrors.Aggregate { if len(list) == 0 { @@ -321,3 +371,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { // FilterOut takes an Aggregate and returns an Aggregate return fromAggregate(err.(utilerrors.Aggregate)) } + +// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation. +func (list ErrorList) ExtractCoveredByDeclarative() ErrorList { + newList := ErrorList{} + for _, err := range list { + if err.CoveredByDeclarative { + newList = append(newList, err) + } + } + return newList +} + +// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation. +func (list ErrorList) RemoveCoveredByDeclarative() ErrorList { + newList := ErrorList{} + for _, err := range list { + if !err.CoveredByDeclarative { + newList = append(newList, err) + } + } + return newList +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go new file mode 100644 index 00000000..6e947c74 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go @@ -0,0 +1,278 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "net" + "net/netip" + "slices" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" +) + +func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) { + var allErrors field.ErrorList + + ip := netutils.ParseIPSloppy(value) + if ip == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) + return nil, allErrors + } + + if strictValidation { + addr, err := netip.ParseAddr(value) + if err != nil { + // If netutils.ParseIPSloppy parsed it, but netip.ParseAddr + // doesn't, then it must have illegal leading 0s. + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s")) + } + if addr.Is4In6() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address")) + } + } + + return ip, allErrors +} + +// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy" +// API field that predates strict IP validation. In particular, this allows IPs that are +// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc"). +// +// If strictValidation is false, this also allows IPs in certain invalid or ambiguous +// formats: +// +// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004"). +// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading +// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets +// as octal, meaning different software might interpret the same string as two +// different IPs, potentially leading to security issues. (Current net.ParseIP and +// netip.ParseAddr simply reject inputs with leading "0"s.) +// +// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to +// different software interpreting the value in different ways, because they may be +// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and +// netip.ParseAddr both allow these, but there are no use cases for representing IPv4 +// addresses as IPv4-mapped IPv6 addresses in Kubernetes.) +// +// Alternatively, when validating an update to an existing field, you can pass a list of +// IP values from the old object that should be accepted if they appear in the new object +// even if they are not valid. +// +// This function should only be used to validate the existing fields that were +// historically validated in this way, and strictValidation should be true unless the +// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields. +func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList { + if slices.Contains(validOldIPs, value) { + return nil + } + _, allErrors := parseIP(fldPath, value, strictValidation) + return allErrors.WithOrigin("format=ip-sloppy") +} + +// IsValidIP tests that the argument is a valid IP address, according to current +// Kubernetes standards for IP address validation. +func IsValidIP(fldPath *field.Path, value string) field.ErrorList { + ip, allErrors := parseIP(fldPath, value, true) + if len(allErrors) != 0 { + return allErrors.WithOrigin("format=ip-strict") + } + + if value != ip.String() { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String()))) + } + return allErrors.WithOrigin("format=ip-strict") +} + +// GetWarningsForIP returns warnings for IP address values in non-standard forms. This +// should only be used with fields that are validated with IsValidIPForLegacyField(). +func GetWarningsForIP(fldPath *field.Path, value string) []string { + ip := netutils.ParseIPSloppy(value) + if ip == nil { + klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value) + return nil + } + + addr, _ := netip.ParseAddr(value) + if !addr.IsValid() || addr.Is4In6() { + // This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but + // ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way, + // re-stringifying the net.IP value will give the preferred form. + return []string{ + fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()), + } + } + + // If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though + // it may be non-canonical. + if addr.Is6() && addr.String() != value { + return []string{ + fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()), + } + } + + return nil +} + +func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) { + var allErrors field.ErrorList + + _, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) + return nil, allErrors + } + + if strictValidation { + prefix, err := netip.ParsePrefix(value) + if err != nil { + // If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix + // doesn't, then it must have illegal leading 0s (either in the + // IP part or the prefix). + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length")) + } else if prefix.Addr().Is4In6() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address")) + } else if prefix.Addr() != prefix.Masked().Addr() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length")) + } + } + + return ipnet, allErrors +} + +// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy" +// API field that predates strict IP validation. In particular, this allows IPs that are +// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64"). +// +// If strictValidation is false, this also allows CIDR values in certain invalid or +// ambiguous formats: +// +// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with +// strictValidation=false. +// +// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after +// the prefix length all being 0), or an "interface address" as with `ip addr` (with a +// complete IP address and associated subnet length). With strict validation, the +// value is required to be in "subnet"/"mask" form. +// +// 3. The prefix length is allowed to have leading 0s. +// +// Alternatively, when validating an update to an existing field, you can pass a list of +// CIDR values from the old object that should be accepted if they appear in the new +// object even if they are not valid. +// +// This function should only be used to validate the existing fields that were +// historically validated in this way, and strictValidation should be true unless the +// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or +// IsValidInterfaceAddress for parsing new fields. +func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList { + if slices.Contains(validOldCIDRs, value) { + return nil + } + + _, allErrors := parseCIDR(fldPath, value, strictValidation) + return allErrors +} + +// IsValidCIDR tests that the argument is a valid CIDR value, according to current +// Kubernetes standards for CIDR validation. This function is only for +// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the +// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values. +func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { + ipnet, allErrors := parseCIDR(fldPath, value, true) + if len(allErrors) != 0 { + return allErrors + } + + if value != ipnet.String() { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String()))) + } + return allErrors +} + +// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should +// only be used with fields that are validated with IsValidCIDRForLegacyField(). +func GetWarningsForCIDR(fldPath *field.Path, value string) []string { + ip, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value) + return nil + } + + var warnings []string + + // Check for bits set after prefix length + if !ip.Equal(ipnet.IP) { + _, addrlen := ipnet.Mask.Size() + singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen) + warnings = append(warnings, + fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR), + ) + } + + prefix, _ := netip.ParsePrefix(value) + addr := prefix.Addr() + if !prefix.IsValid() || addr.Is4In6() { + // This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but + // ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way, + // re-stringifying the net.IPNet value will give the preferred form. + warnings = append(warnings, + fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()), + ) + } + + // If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid, + // though it may be non-canonical. But only check this if there are no other + // warnings, since either of the other warnings would also cause a round-trip + // failure. + if len(warnings) == 0 && addr.Is6() && prefix.String() != value { + warnings = append(warnings, + fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()), + ) + } + + return warnings +} + +// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in +// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated +// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g., +// "192.168.1.0/24"). +func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)")) + return allErrors + } + + // The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't + // include the bits after the prefix. We need to construct the canonical form + // ourselves from `ip` and `ipnet.Mask`. + maskSize, _ := ipnet.Mask.Size() + if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 { + // "::ffff:192.168.0.1/120" -> "192.168.0.1/24" + maskSize -= (net.IPv6len - net.IPv4len) * 8 + } + canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize) + if value != canonical { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical))) + } + return allErrors +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 9bc393cf..b6be7af1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -24,7 +24,6 @@ import ( "unicode" "k8s.io/apimachinery/pkg/util/validation/field" - netutils "k8s.io/utils/net" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -369,45 +368,6 @@ func IsValidPortName(port string) []string { return errs } -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - if netutils.ParseIPSloppy(value) == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) - } - return allErrors -} - -// IsValidIPv4Address tests that the argument is a valid IPv4 address. -func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := netutils.ParseIPSloppy(value) - if ip == nil || ip.To4() == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) - } - return allErrors -} - -// IsValidIPv6Address tests that the argument is a valid IPv6 address. -func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := netutils.ParseIPSloppy(value) - if ip == nil || ip.To4() != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) - } - return allErrors -} - -// IsValidCIDR tests that the argument is a valid CIDR value. -func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - _, _, err := netutils.ParseCIDRSloppy(value) - if err != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) - } - return allErrors -} - const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go index 41876192..177be09a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -157,6 +157,8 @@ func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) Dela // Until is syntactic sugar on top of JitterUntil with zero jitter factor and // with sliding = true (which means the timer for period starts after the f // completes). +// +// Contextual logging: UntilWithContext should be used instead of Until in code which supports contextual logging. func Until(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, true, stopCh) } @@ -176,6 +178,8 @@ func UntilWithContext(ctx context.Context, f func(context.Context), period time. // NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter // factor, with sliding = false (meaning the timer for period starts at the same // time as the function starts). +// +// Contextual logging: NonSlidingUntilWithContext should be used instead of NonSlidingUntil in code which supports contextual logging. func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, false, stopCh) } @@ -200,19 +204,44 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe // // Close stopCh to stop. f may not be invoked if stop channel is already // closed. Pass NeverStop to if you don't want it stop. +// +// Contextual logging: JitterUntilWithContext should be used instead of JitterUntil in code which supports contextual logging. func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) } +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already done. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + BackoffUntilWithContext(ctx, f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding) +} + // BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. // // If sliding is true, the period is computed after f runs. If it is false then // period includes the runtime for f. +// +// Contextual logging: BackoffUntilWithContext should be used instead of BackoffUntil in code which supports contextual logging. func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + BackoffUntilWithContext(ContextForChannel(stopCh), func(context.Context) { f() }, backoff, sliding) +} + +// BackoffUntilWithContext loops until context is done, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntilWithContext(ctx context.Context, f func(ctx context.Context), backoff BackoffManager, sliding bool) { var t clock.Timer for { select { - case <-stopCh: + case <-ctx.Done(): return default: } @@ -222,8 +251,8 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan } func() { - defer runtime.HandleCrash() - f() + defer runtime.HandleCrashWithContext(ctx) + f(ctx) }() if sliding { @@ -236,7 +265,7 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan // In order to mitigate we re-check stopCh at the beginning // of every loop to prevent extra executions of f(). select { - case <-stopCh: + case <-ctx.Done(): if !t.Stop() { <-t.C() } @@ -246,19 +275,6 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan } } -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // backoffManager provides simple backoff behavior in a threadsafe manner to a caller. type backoffManager struct { backoff Backoff diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go index 3f0c968e..ff89dc17 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package wait provides tools for polling or listening for changes // to a condition. -package wait // import "k8s.io/apimachinery/pkg/util/wait" +package wait diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 107bfc13..9f9b929f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -49,7 +49,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding // if we haven't requested immediate execution, delay once if immediate { if ok, err := func() (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) }(); err != nil || ok { return err @@ -83,7 +83,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding t.Next() } if ok, err := func() (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) }(); err != nil || ok { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 6805e8cf..7379a8d5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -80,6 +80,10 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } +// jitterRand is a dedicated random source for jitter calculations. +// It defaults to rand.Float64, but is a package variable so it can be overridden to make unit tests deterministic. +var jitterRand = rand.Float64 + // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -89,7 +93,7 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { if maxFactor <= 0.0 { maxFactor = 1.0 } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + wait := duration + time.Duration(jitterRand()*maxFactor*float64(duration)) return wait } @@ -141,6 +145,7 @@ func (c channelContext) Value(key any) any { return nil } // // Deprecated: Will be removed when the legacy polling methods are removed. func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + //nolint:logcheck // Already deprecated. defer runtime.HandleCrash() return condition() } @@ -150,7 +155,7 @@ func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { // // Deprecated: Will be removed when the legacy polling methods are removed. func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 9837b3df..7342f8d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -20,10 +20,12 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "strings" "unicode" + "unicode/utf8" jsonutil "k8s.io/apimachinery/pkg/util/json" @@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error { // YAML decoding path is not used (so that error messages are // JSON specific). func ToJSON(data []byte) ([]byte, error) { - if hasJSONPrefix(data) { + if IsJSONBuffer(data) { return data, nil } return yaml.YAMLToJSON(data) @@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) { // separating individual documents. It first converts the YAML // body to JSON, then unmarshals the JSON. type YAMLToJSONDecoder struct { - reader Reader + reader Reader + inputOffset int } // NewYAMLToJSONDecoder decodes YAML documents from the provided @@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { // yaml.Unmarshal. func (d *YAMLToJSONDecoder) Decode(into interface{}) error { bytes, err := d.reader.Read() - if err != nil && err != io.EOF { + if err != nil && err != io.EOF { //nolint:errorlint return err } @@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error { return YAMLSyntaxError{err} } } + d.inputOffset += len(bytes) return err } +func (d *YAMLToJSONDecoder) InputOffset() int { + return d.inputOffset +} + // YAMLDecoder reads chunks of objects and returns ErrShortBuffer if // the data is not sufficient. type YAMLDecoder struct { @@ -229,18 +237,20 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err return 0, nil, nil } -// decoder is a convenience interface for Decode. -type decoder interface { - Decode(into interface{}) error -} - -// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or -// YAML documents by sniffing for a leading { character. +// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents. +// While JSON is YAML, the way Go's JSON decode defines a multi-document stream +// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document +// stream as a series of documents separated by "---". +// +// This decoder will attempt to decode the stream as JSON first, and if that +// fails, it will switch to YAML. Once it determines the stream is JSON (by +// finding a non-YAML-delimited series of objects), it will not switch to YAML. +// Once it switches to YAML it will not switch back to JSON. type YAMLOrJSONDecoder struct { - r io.Reader - bufferSize int - - decoder decoder + json *json.Decoder + yaml *YAMLToJSONDecoder + stream *StreamReader + count int // how many objects have been decoded } type JSONSyntaxError struct { @@ -265,31 +275,108 @@ func (e YAMLSyntaxError) Error() string { // how far into the stream the decoder will look to figure out whether this // is a JSON stream (has whitespace followed by an open brace). func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { - return &YAMLOrJSONDecoder{ - r: r, - bufferSize: bufferSize, + d := &YAMLOrJSONDecoder{} + + reader, _, mightBeJSON := GuessJSONStream(r, bufferSize) + d.stream = reader + if mightBeJSON { + d.json = json.NewDecoder(reader) + } else { + d.yaml = NewYAMLToJSONDecoder(reader) } + return d } // Decode unmarshals the next object from the underlying stream into the // provide object, or returns an error. func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { - if d.decoder == nil { - buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize) - if isJSON { - d.decoder = json.NewDecoder(buffer) + // Because we don't know if this is a JSON or YAML stream, a failure from + // both decoders is ambiguous. When in doubt, it will return the error from + // the JSON decoder. Unfortunately, this means that if the first document + // is invalid YAML, the error won't be awesome. + // TODO: the errors from YAML are not great, we could improve them a lot. + var firstErr error + if d.json != nil { + err := d.json.Decode(into) + if err == nil { + d.stream.Consume(int(d.json.InputOffset()) - d.stream.Consumed()) + d.count++ + return nil + } + if err == io.EOF { //nolint:errorlint + return err + } + var syntax *json.SyntaxError + if ok := errors.As(err, &syntax); ok { + firstErr = JSONSyntaxError{ + Offset: syntax.Offset, + Err: syntax, + } } else { - d.decoder = NewYAMLToJSONDecoder(buffer) + firstErr = err + } + if d.count > 1 { + // If we found 0 or 1 JSON object(s), this stream is still + // ambiguous. But if we found more than 1 JSON object, then this + // is an unambiguous JSON stream, and we should not switch to YAML. + return err + } + // If JSON decoding hits the end of one object and then fails on the + // next, it leaves any leading whitespace in the buffer, which can + // confuse the YAML decoder. We just eat any whitespace we find, up to + // and including the first newline. + d.stream.Rewind() + if err := d.consumeWhitespace(); err == nil { + d.yaml = NewYAMLToJSONDecoder(d.stream) + } + d.json = nil + } + if d.yaml != nil { + err := d.yaml.Decode(into) + if err == nil { + d.stream.Consume(d.yaml.InputOffset() - d.stream.Consumed()) + d.count++ + return nil + } + if err == io.EOF { //nolint:errorlint + return err + } + if firstErr == nil { + firstErr = err } } - err := d.decoder.Decode(into) - if syntax, ok := err.(*json.SyntaxError); ok { - return JSONSyntaxError{ - Offset: syntax.Offset, - Err: syntax, + if firstErr != nil { + return firstErr + } + return fmt.Errorf("decoding failed as both JSON and YAML") +} + +func (d *YAMLOrJSONDecoder) consumeWhitespace() error { + consumed := 0 + for { + buf, err := d.stream.ReadN(4) + if err != nil && err == io.EOF { //nolint:errorlint + return err + } + r, sz := utf8.DecodeRune(buf) + if r == utf8.RuneError || sz == 0 { + return fmt.Errorf("invalid utf8 rune") + } + d.stream.RewindN(len(buf) - sz) + if !unicode.IsSpace(r) { + d.stream.RewindN(sz) + d.stream.Consume(consumed) + return nil + } + if r == '\n' { + d.stream.Consume(consumed) + return nil + } + if err == io.EOF { //nolint:errorlint + break } } - return err + return io.EOF } type Reader interface { @@ -311,7 +398,7 @@ func (r *YAMLReader) Read() ([]byte, error) { var buffer bytes.Buffer for { line, err := r.reader.Read() - if err != nil && err != io.EOF { + if err != nil && err != io.EOF { //nolint:errorlint return nil, err } @@ -329,11 +416,11 @@ func (r *YAMLReader) Read() ([]byte, error) { if buffer.Len() != 0 { return buffer.Bytes(), nil } - if err == io.EOF { + if err == io.EOF { //nolint:errorlint return nil, err } } - if err == io.EOF { + if err == io.EOF { //nolint:errorlint if buffer.Len() != 0 { // If we're at EOF, we have a final, non-terminated line. Return it. return buffer.Bytes(), nil @@ -369,26 +456,20 @@ func (r *LineReader) Read() ([]byte, error) { // GuessJSONStream scans the provided reader up to size, looking // for an open brace indicating this is JSON. It will return the // bufio.Reader it creates for the consumer. -func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { - buffer := bufio.NewReaderSize(r, size) +func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) { + buffer := NewStreamReader(r, size) b, _ := buffer.Peek(size) - return buffer, b, hasJSONPrefix(b) + return buffer, b, IsJSONBuffer(b) } // IsJSONBuffer scans the provided buffer, looking // for an open brace indicating this is JSON. func IsJSONBuffer(buf []byte) bool { - return hasJSONPrefix(buf) + return hasPrefix(buf, jsonPrefix) } var jsonPrefix = []byte("{") -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - // Return true if the first non-whitespace bytes in buf is // prefix. func hasPrefix(buf []byte, prefix []byte) bool { diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go new file mode 100644 index 00000000..d0699105 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go @@ -0,0 +1,130 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import "io" + +// StreamReader is a reader designed for consuming streams of variable-length +// messages. It buffers data until it is explicitly consumed, and can be +// rewound to re-read previous data. +type StreamReader struct { + r io.Reader + buf []byte + head int // current read offset into buf + ttlConsumed int // number of bytes which have been consumed +} + +// NewStreamReader creates a new StreamReader wrapping the provided +// io.Reader. +func NewStreamReader(r io.Reader, size int) *StreamReader { + if size == 0 { + size = 4096 + } + return &StreamReader{ + r: r, + buf: make([]byte, 0, size), // Start with a reasonable capacity + } +} + +// Read implements io.Reader. It first returns any buffered data after the +// current offset, and if that's exhausted, reads from the underlying reader +// and buffers the data. The returned data is not considered consumed until the +// Consume method is called. +func (r *StreamReader) Read(p []byte) (n int, err error) { + // If we have buffered data, return it + if r.head < len(r.buf) { + n = copy(p, r.buf[r.head:]) + r.head += n + return n, nil + } + + // If we've already hit EOF, return it + if r.r == nil { + return 0, io.EOF + } + + // Read from the underlying reader + n, err = r.r.Read(p) + if n > 0 { + r.buf = append(r.buf, p[:n]...) + r.head += n + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Store that we've hit EOF by setting r to nil + r.r = nil + } + return n, err +} + +// ReadN reads exactly n bytes from the reader, blocking until all bytes are +// read or an error occurs. If an error occurs, the number of bytes read is +// returned along with the error. If EOF is hit before n bytes are read, this +// will return the bytes read so far, along with io.EOF. The returned data is +// not considered consumed until the Consume method is called. +func (r *StreamReader) ReadN(want int) ([]byte, error) { + ret := make([]byte, want) + off := 0 + for off < want { + n, err := r.Read(ret[off:]) + if err != nil { + return ret[:off+n], err + } + off += n + } + return ret, nil +} + +// Peek returns the next n bytes without advancing the reader. The returned +// bytes are valid until the next call to Consume. +func (r *StreamReader) Peek(n int) ([]byte, error) { + buf, err := r.ReadN(n) + r.RewindN(len(buf)) + if err != nil { + return buf, err + } + return buf, nil +} + +// Rewind resets the reader to the beginning of the buffered data. +func (r *StreamReader) Rewind() { + r.head = 0 +} + +// RewindN rewinds the reader by n bytes. If n is greater than the current +// buffer, the reader is rewound to the beginning of the buffer. +func (r *StreamReader) RewindN(n int) { + r.head -= min(n, r.head) +} + +// Consume discards up to n bytes of previously read data from the beginning of +// the buffer. Once consumed, that data is no longer available for rewinding. +// If n is greater than the current buffer, the buffer is cleared. Consume +// never consume data from the underlying reader. +func (r *StreamReader) Consume(n int) { + n = min(n, len(r.buf)) + r.buf = r.buf[n:] + r.head -= n + r.ttlConsumed += n +} + +// Consumed returns the number of bytes consumed from the input reader. +func (r *StreamReader) Consumed() int { + return r.ttlConsumed +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 29574fd6..5f446a4f 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -16,5 +16,5 @@ limitations under the License. // +k8s:openapi-gen=true -// Package version supplies the type for version information collected at build time. -package version // import "k8s.io/apimachinery/pkg/version" +// Package version supplies the type for version information. +package version diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go index 72727b50..6a18f9e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/types.go +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -20,15 +20,25 @@ package version // TODO: Add []string of api versions supported? It's still unclear // how we'll want to distribute that information. type Info struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - GitCommit string `json:"gitCommit"` - GitTreeState string `json:"gitTreeState"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` + // Major is the major version of the binary version + Major string `json:"major"` + // Minor is the minor version of the binary version + Minor string `json:"minor"` + // EmulationMajor is the major version of the emulation version + EmulationMajor string `json:"emulationMajor,omitempty"` + // EmulationMinor is the minor version of the emulation version + EmulationMinor string `json:"emulationMinor,omitempty"` + // MinCompatibilityMajor is the major version of the minimum compatibility version + MinCompatibilityMajor string `json:"minCompatibilityMajor,omitempty"` + // MinCompatibilityMinor is the minor version of the minimum compatibility version + MinCompatibilityMinor string `json:"minCompatibilityMinor,omitempty"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` } // String returns info as a human-friendly version string. diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go index 7e6bf3fb..5fde5e74 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package watch contains a generic watchable interface, and a fake for // testing code that uses the watch interface. -package watch // import "k8s.io/apimachinery/pkg/watch" +package watch diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 42dcac2b..b422ca9f 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -51,6 +51,7 @@ type Reporter interface { // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { + logger klog.Logger sync.Mutex source Decoder reporter Reporter @@ -59,8 +60,16 @@ type StreamWatcher struct { } // NewStreamWatcher creates a StreamWatcher from the given decoder. +// +// Contextual logging: NewStreamWatcherWithLogger should be used instead of NewStreamWatcher in code which supports contextual logging. func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { + return NewStreamWatcherWithLogger(klog.Background(), d, r) +} + +// NewStreamWatcherWithLogger creates a StreamWatcher from the given decoder and logger. +func NewStreamWatcherWithLogger(logger klog.Logger, d Decoder, r Reporter) *StreamWatcher { sw := &StreamWatcher{ + logger: logger, source: d, reporter: r, // It's easy for a consumer to add buffering via an extra @@ -98,7 +107,7 @@ func (sw *StreamWatcher) Stop() { // receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { - defer utilruntime.HandleCrash() + defer utilruntime.HandleCrashWithLogger(sw.logger) defer close(sw.result) defer sw.Stop() for { @@ -108,10 +117,10 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + sw.logger.V(1).Info("Unexpected EOF during watch stream event decoding", "err", err) default: if net.IsProbableEOF(err) || net.IsTimeout(err) { - klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) + sw.logger.V(5).Info("Unable to decode an event from the watch stream", "err", err) } else { select { case <-sw.done: diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index ce37fd8c..25145983 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -23,6 +23,7 @@ import ( "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -103,29 +104,42 @@ func (w emptyWatch) ResultChan() <-chan Event { // FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type FakeWatcher struct { + logger klog.Logger result chan Event stopped bool sync.Mutex } +var _ Interface = &FakeWatcher{} + +// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging. func NewFake() *FakeWatcher { - return &FakeWatcher{ - result: make(chan Event), - } + return NewFakeWithOptions(FakeOptions{}) } +// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging. func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return NewFakeWithOptions(FakeOptions{ChannelSize: size}) +} + +func NewFakeWithOptions(options FakeOptions) *FakeWatcher { return &FakeWatcher{ - result: make(chan Event, size), + logger: ptr.Deref(options.Logger, klog.Background()), + result: make(chan Event, options.ChannelSize), } } +type FakeOptions struct { + Logger *klog.Logger + ChannelSize int +} + // Stop implements Interface.Stop(). func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.stopped { - klog.V(4).Infof("Stopping fake watcher.") + f.logger.V(4).Info("Stopping fake watcher") close(f.result) f.stopped = true } @@ -176,13 +190,22 @@ func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { // RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type RaceFreeFakeWatcher struct { + logger klog.Logger result chan Event Stopped bool sync.Mutex } +var _ Interface = &RaceFreeFakeWatcher{} + +// Contextual logging: RaceFreeFakeWatcherWithLogger should be used instead of NewRaceFreeFake in code which supports contextual logging. func NewRaceFreeFake() *RaceFreeFakeWatcher { + return NewRaceFreeFakeWithLogger(klog.Background()) +} + +func NewRaceFreeFakeWithLogger(logger klog.Logger) *RaceFreeFakeWatcher { return &RaceFreeFakeWatcher{ + logger: logger, result: make(chan Event, DefaultChanSize), } } @@ -192,7 +215,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - klog.V(4).Infof("Stopping fake watcher.") + f.logger.V(4).Info("Stopping fake watcher") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go index a74f6a83..344d2ebb 100644 --- a/vendor/k8s.io/client-go/features/known_features.go +++ b/vendor/k8s.io/client-go/features/known_features.go @@ -53,6 +53,12 @@ const ( // alpha: v1.30 InformerResourceVersion Feature = "InformerResourceVersion" + // owner: @deads2k + // beta: v1.33 + // + // Refactor informers to deliver watch stream events in order instead of out of order. + InOrderInformers Feature = "InOrderInformers" + // owner: @p0lyn0mial // beta: v1.30 // @@ -73,5 +79,6 @@ var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{ ClientsAllowCBOR: {Default: false, PreRelease: Alpha}, ClientsPreferCBOR: {Default: false, PreRelease: Alpha}, InformerResourceVersion: {Default: false, PreRelease: Alpha}, + InOrderInformers: {Default: true, PreRelease: Beta}, WatchListClient: {Default: false, PreRelease: Beta}, } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index b9945975..486790fa 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io -package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" +package clientauthentication diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go index 94ca35c2..e378b75c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=client.authentication.k8s.io -package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1" +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index 22d1c588..6eb6a981 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=client.authentication.k8s.io -package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 05e997e1..c3ace745 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -18,4 +18,4 @@ limitations under the License. // Package version supplies version information collected at build time to // kubernetes components. -package version // import "k8s.io/client-go/pkg/version" +package version diff --git a/vendor/k8s.io/client-go/rest/.mockery.yaml b/vendor/k8s.io/client-go/rest/.mockery.yaml new file mode 100644 index 00000000..e21d7b5b --- /dev/null +++ b/vendor/k8s.io/client-go/rest/.mockery.yaml @@ -0,0 +1,10 @@ +--- +dir: . +filename: "mock_{{.InterfaceName | snakecase}}_test.go" +boilerplate-file: ../../../../../hack/boilerplate/boilerplate.generatego.txt +outpkg: rest +with-expecter: true +packages: + k8s.io/client-go/rest: + interfaces: + BackoffManager: diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 159caa13..a085c334 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -93,7 +93,7 @@ type RESTClient struct { content requestClientContentConfigProvider // creates BackoffManager that is passed to requests. - createBackoffMgr func() BackoffManager + createBackoffMgr func() BackoffManagerWithContext // rateLimiter is shared among all requests created by this client unless specifically // overridden. @@ -101,7 +101,7 @@ type RESTClient struct { // warningHandler is shared among all requests created by this client. // If not set, defaultWarningHandler is used. - warningHandler WarningHandler + warningHandler WarningHandlerWithContext // Set specific behavior of the client. If not set http.DefaultClient will be used. Client *http.Client @@ -178,7 +178,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { // readExpBackoffConfig handles the internal logic of determining what the // backoff policy is. By default if no information is available, NoBackoff. // TODO Generalize this see #17727 . -func readExpBackoffConfig() BackoffManager { +func readExpBackoffConfig() BackoffManagerWithContext { backoffBase := os.Getenv(envBackoffBase) backoffDuration := os.Getenv(envBackoffDuration) diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index f2e813d0..82d4f713 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -129,10 +129,23 @@ type Config struct { RateLimiter flowcontrol.RateLimiter // WarningHandler handles warnings in server responses. - // If not set, the default warning handler is used. - // See documentation for SetDefaultWarningHandler() for details. + // If this and WarningHandlerWithContext are not set, the + // default warning handler is used. If both are set, + // WarningHandlerWithContext is used. + // + // See documentation for [SetDefaultWarningHandler] for details. + // + //logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging. WarningHandler WarningHandler + // WarningHandlerWithContext handles warnings in server responses. + // If this and WarningHandler are not set, the + // default warning handler is used. If both are set, + // WarningHandlerWithContext is used. + // + // See documentation for [SetDefaultWarningHandler] for details. + WarningHandlerWithContext WarningHandlerWithContext + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. Timeout time.Duration @@ -381,12 +394,27 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES } restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) - if err == nil && config.WarningHandler != nil { - restClient.warningHandler = config.WarningHandler - } + maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext) return restClient, err } +// maybeSetWarningHandler sets the handlerWithContext if non-nil, +// otherwise the handler with a wrapper if non-nil, +// and does nothing if both are nil. +// +// May be called for a nil client. +func maybeSetWarningHandler(c *RESTClient, handler WarningHandler, handlerWithContext WarningHandlerWithContext) { + if c == nil { + return + } + switch { + case handlerWithContext != nil: + c.warningHandler = handlerWithContext + case handler != nil: + c.warningHandler = warningLoggerNopContext{l: handler} + } +} + // UnversionedRESTClientFor is the same as RESTClientFor, except that it allows // the config.Version to be empty. func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { @@ -448,9 +476,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl } restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) - if err == nil && config.WarningHandler != nil { - restClient.warningHandler = config.WarningHandler - } + maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext) return restClient, err } @@ -532,6 +558,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { + //nolint:logcheck // The decision to log this instead of returning an error goes back to ~2016. It's part of the client-go API now, so not changing it just to support contextual logging. klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile @@ -616,15 +643,16 @@ func AnonymousClientConfig(config *Config) *Config { CAData: config.TLSClientConfig.CAData, NextProtos: config.TLSClientConfig.NextProtos, }, - RateLimiter: config.RateLimiter, - WarningHandler: config.WarningHandler, - UserAgent: config.UserAgent, - DisableCompression: config.DisableCompression, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, - Proxy: config.Proxy, + RateLimiter: config.RateLimiter, + WarningHandler: config.WarningHandler, + WarningHandlerWithContext: config.WarningHandlerWithContext, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, + Proxy: config.Proxy, } } @@ -658,17 +686,18 @@ func CopyConfig(config *Config) *Config { CAData: config.TLSClientConfig.CAData, NextProtos: config.TLSClientConfig.NextProtos, }, - UserAgent: config.UserAgent, - DisableCompression: config.DisableCompression, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - RateLimiter: config.RateLimiter, - WarningHandler: config.WarningHandler, - Timeout: config.Timeout, - Dial: config.Dial, - Proxy: config.Proxy, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + WarningHandler: config.WarningHandler, + WarningHandlerWithContext: config.WarningHandlerWithContext, + Timeout: config.Timeout, + Dial: config.Dial, + Proxy: config.Proxy, } if config.ExecProvider != nil && config.ExecProvider.Config != nil { c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject() diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index ae5cbdc2..f7a4e4f3 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -21,8 +21,6 @@ import ( "net/http" "sync" - "k8s.io/klog/v2" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -65,7 +63,10 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("auth Provider Plugin %q was registered twice", name) } - klog.V(4).Infof("Registered Auth Provider Plugin %q", name) + // RegisterAuthProviderPlugin gets called during the init phase before + // logging is initialized and therefore should not emit logs. If you + // need this message for debugging something, then uncomment it. + // klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0ec90ad1..1eb2f9b4 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -54,7 +54,7 @@ import ( "k8s.io/utils/clock" ) -var ( +const ( // longThrottleLatency defines threshold for logging requests. All requests being // throttled (via the provided rateLimiter) for more than longThrottleLatency will // be logged. @@ -103,10 +103,10 @@ type Request struct { contentConfig ClientContentConfig contentTypeNotSet bool - warningHandler WarningHandler + warningHandler WarningHandlerWithContext rateLimiter flowcontrol.RateLimiter - backoff BackoffManager + backoff BackoffManagerWithContext timeout time.Duration maxRetries int @@ -136,7 +136,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(c *RESTClient) *Request { - var backoff BackoffManager + var backoff BackoffManagerWithContext if c.createBackoffMgr != nil { backoff = c.createBackoffMgr() } @@ -259,20 +259,47 @@ func (r *Request) Resource(resource string) *Request { } // BackOff sets the request's backoff manager to the one specified, -// or defaults to the stub implementation if nil is provided +// or defaults to the stub implementation if nil is provided. +// +// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackOffWithContext and BackoffManagerWithContext instead. func (r *Request) BackOff(manager BackoffManager) *Request { if manager == nil { r.backoff = &NoBackoff{} return r } + r.backoff = &backoffManagerNopContext{BackoffManager: manager} + return r +} + +// BackOffWithContext sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided. +func (r *Request) BackOffWithContext(manager BackoffManagerWithContext) *Request { + if manager == nil { + r.backoff = &NoBackoff{} + return r + } + r.backoff = manager return r } // WarningHandler sets the handler this client uses when warning headers are encountered. -// If set to nil, this client will use the default warning handler (see SetDefaultWarningHandler). +// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandler]). +// +//logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging. func (r *Request) WarningHandler(handler WarningHandler) *Request { + if handler == nil { + r.warningHandler = nil + return r + } + r.warningHandler = warningLoggerNopContext{l: handler} + return r +} + +// WarningHandlerWithContext sets the handler this client uses when warning headers are encountered. +// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandlerWithContext]). +func (r *Request) WarningHandlerWithContext(handler WarningHandlerWithContext) *Request { r.warningHandler = handler return r } @@ -649,21 +676,17 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err } latency := time.Since(now) - var message string - switch { - case len(retryInfo) > 0: - message = fmt.Sprintf("Waited for %v, %s - request: %s:%s", latency, retryInfo, r.verb, r.URL().String()) - default: - message = fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s:%s", latency, r.verb, r.URL().String()) - } - if latency > longThrottleLatency { - klog.V(3).Info(message) - } - if latency > extraLongThrottleLatency { - // If the rate limiter latency is very high, the log message should be printed at a higher log level, - // but we use a throttled logger to prevent spamming. - globalThrottledLogger.Infof("%s", message) + if retryInfo == "" { + retryInfo = "client-side throttling, not priority and fairness" + } + klog.FromContext(ctx).V(3).Info("Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL()) + + if latency > extraLongThrottleLatency { + // If the rate limiter latency is very high, the log message should be printed at a higher log level, + // but we use a throttled logger to prevent spamming. + globalThrottledLogger.info(klog.FromContext(ctx), "Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL()) + } } metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency) @@ -675,7 +698,7 @@ func (r *Request) tryThrottle(ctx context.Context) error { } type throttleSettings struct { - logLevel klog.Level + logLevel int minLogInterval time.Duration lastLogTime time.Time @@ -700,9 +723,9 @@ var globalThrottledLogger = &throttledLogger{ }, } -func (b *throttledLogger) attemptToLog() (klog.Level, bool) { +func (b *throttledLogger) attemptToLog(logger klog.Logger) (int, bool) { for _, setting := range b.settings { - if bool(klog.V(setting.logLevel).Enabled()) { + if bool(logger.V(setting.logLevel).Enabled()) { // Return early without write locking if possible. if func() bool { setting.lock.RLock() @@ -724,9 +747,9 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) { // Infof will write a log message at each logLevel specified by the receiver's throttleSettings // as long as it hasn't written a log message more recently than minLogInterval. -func (b *throttledLogger) Infof(message string, args ...interface{}) { - if logLevel, ok := b.attemptToLog(); ok { - klog.V(logLevel).Infof(message, args...) +func (b *throttledLogger) info(logger klog.Logger, message string, kv ...any) { + if logLevel, ok := b.attemptToLog(logger); ok { + logger.V(logLevel).Info(message, kv...) } } @@ -739,7 +762,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) { if r.body == nil { - logBody(ctx, 2, "Request Body", r.bodyBytes) + logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes) } // We specifically don't want to rate limit watches, so we @@ -776,7 +799,7 @@ func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.D resp, err := client.Do(req) retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { - return r.newStreamWatcher(resp) + return r.newStreamWatcher(ctx, resp) } done, transformErr := func() (bool, error) { @@ -898,7 +921,7 @@ func (r WatchListResult) Into(obj runtime.Object) error { // to see what parameters are currently required. func (r *Request) WatchList(ctx context.Context) WatchListResult { if r.body == nil { - logBody(ctx, 2, "Request Body", r.bodyBytes) + logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes) } if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { @@ -969,23 +992,24 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negoti } } -func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) { +func (r *Request) newStreamWatcher(ctx context.Context, resp *http.Response) (watch.Interface, runtime.Decoder, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { - klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err) + klog.FromContext(ctx).V(4).Info("Unexpected content type from the server", "contentType", contentType, "err", err) } objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params) if err != nil { return nil, nil, err } - handleWarnings(resp.Header, r.warningHandler) + handleWarnings(ctx, resp.Header, r.warningHandler) frameReader := framer.NewFrameReader(resp.Body) watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer) - return watch.NewStreamWatcher( + return watch.NewStreamWatcherWithLogger( + klog.FromContext(ctx), restclientwatch.NewDecoder(watchEventDecoder, objectDecoder), // use 500 to indicate that the cause of the error is unknown - other error codes // are more specific to HTTP interactions, and set a reason @@ -1031,7 +1055,7 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) { // If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if r.body == nil { - logBody(ctx, 2, "Request Body", r.bodyBytes) + logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes) } if r.err != nil { @@ -1067,7 +1091,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { switch { case (resp.StatusCode >= 200) && (resp.StatusCode < 300): - handleWarnings(resp.Header, r.warningHandler) + handleWarnings(ctx, resp.Header, r.warningHandler) return resp.Body, nil default: @@ -1175,7 +1199,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp }() if r.err != nil { - klog.V(4).Infof("Error in request: %v", r.err) + klog.FromContext(ctx).V(4).Info("Error in request", "err", r.err) return r.err } @@ -1267,8 +1291,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp // - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // - http.Client.Do errors are returned directly. func (r *Request) Do(ctx context.Context) Result { + logger := klog.FromContext(ctx) if r.body == nil { - logBody(ctx, 2, "Request Body", r.bodyBytes) + logBody(logger, 2, "Request Body", r.bodyBytes) } var result Result @@ -1276,7 +1301,7 @@ func (r *Request) Do(ctx context.Context) Result { result = r.transformResponse(ctx, resp, req) }) if err != nil { - return Result{err: err} + return Result{err: err, logger: logger} } if result.err == nil || len(result.body) > 0 { metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body))) @@ -1286,14 +1311,15 @@ func (r *Request) Do(ctx context.Context) Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { + logger := klog.FromContext(ctx) if r.body == nil { - logBody(ctx, 2, "Request Body", r.bodyBytes) + logBody(logger, 2, "Request Body", r.bodyBytes) } var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { result.body, result.err = io.ReadAll(resp.Body) - logBody(ctx, 2, "Response Body", result.body) + logBody(logger, 2, "Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) } @@ -1309,6 +1335,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { // transformResponse converts an API response into a structured API object func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result { + logger := klog.FromContext(ctx) var body []byte if resp.Body != nil { data, err := io.ReadAll(resp.Body) @@ -1323,22 +1350,24 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + logger.V(2).Info("Stream error when reading response body, may be caused by closed connection", "err", err) streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err) return Result{ - err: streamErr, + err: streamErr, + logger: logger, } default: - klog.Errorf("Unexpected error when reading response body: %v", err) + logger.Error(err, "Unexpected error when reading response body") unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err) return Result{ - err: unexpectedErr, + err: unexpectedErr, + logger: logger, } } } // Call depth is tricky. This one is okay for Do and DoRaw. - logBody(ctx, 7, "Response Body", body) + logBody(logger, 7, "Response Body", body) // verify the content type is accurate var decoder runtime.Decoder @@ -1350,7 +1379,7 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re var err error mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { - return Result{err: errors.NewInternalError(err)} + return Result{err: errors.NewInternalError(err), logger: logger} } decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params) if err != nil { @@ -1359,13 +1388,14 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re case resp.StatusCode == http.StatusSwitchingProtocols: // no-op, we've been upgraded case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: - return Result{err: r.transformUnstructuredResponseError(resp, req, body)} + return Result{err: r.transformUnstructuredResponseError(resp, req, body), logger: logger} } return Result{ body: body, contentType: contentType, statusCode: resp.StatusCode, - warnings: handleWarnings(resp.Header, r.warningHandler), + warnings: handleWarnings(ctx, resp.Header, r.warningHandler), + logger: logger, } } } @@ -1384,7 +1414,8 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re statusCode: resp.StatusCode, decoder: decoder, err: err, - warnings: handleWarnings(resp.Header, r.warningHandler), + warnings: handleWarnings(ctx, resp.Header, r.warningHandler), + logger: logger, } } @@ -1393,7 +1424,8 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re contentType: contentType, statusCode: resp.StatusCode, decoder: decoder, - warnings: handleWarnings(resp.Header, r.warningHandler), + warnings: handleWarnings(ctx, resp.Header, r.warningHandler), + logger: logger, } } @@ -1421,8 +1453,7 @@ func truncateBody(logger klog.Logger, body string) string { // whether the body is printable. // // It needs to be called by all functions which send or receive the data. -func logBody(ctx context.Context, callDepth int, prefix string, body []byte) { - logger := klog.FromContext(ctx) +func logBody(logger klog.Logger, callDepth int, prefix string, body []byte) { if loggerV := logger.V(8); loggerV.Enabled() { loggerV := loggerV.WithCallDepth(callDepth) if bytes.IndexFunc(body, func(r rune) bool { @@ -1524,6 +1555,7 @@ type Result struct { contentType string err error statusCode int + logger klog.Logger decoder runtime.Decoder } @@ -1629,7 +1661,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + r.logger.V(5).Info("Body was not decodable (unable to check for Status)", "err", err) return r.err } switch t := out.(type) { diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go index 2f9962d7..5b7b4e21 100644 --- a/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -17,6 +17,8 @@ limitations under the License. package rest import ( + "context" + "fmt" "net/url" "time" @@ -32,12 +34,24 @@ import ( var serverIsOverloadedSet = sets.NewInt(429) var maxResponseCode = 499 +//go:generate mockery + +// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackoffManagerWithContext instead. type BackoffManager interface { - UpdateBackoff(actualUrl *url.URL, err error, responseCode int) - CalculateBackoff(actualUrl *url.URL) time.Duration + UpdateBackoff(actualURL *url.URL, err error, responseCode int) + CalculateBackoff(actualURL *url.URL) time.Duration Sleep(d time.Duration) } +type BackoffManagerWithContext interface { + UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) + CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration + SleepWithContext(ctx context.Context, d time.Duration) +} + +var _ BackoffManager = &URLBackoff{} +var _ BackoffManagerWithContext = &URLBackoff{} + // URLBackoff struct implements the semantics on top of Backoff which // we need for URL specific exponential backoff. type URLBackoff struct { @@ -49,11 +63,19 @@ type URLBackoff struct { type NoBackoff struct { } -func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { +func (n *NoBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) { // do nothing. } -func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { +func (n *NoBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) { + // do nothing. +} + +func (n *NoBackoff) CalculateBackoff(actualURL *url.URL) time.Duration { + return 0 * time.Second +} + +func (n *NoBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration { return 0 * time.Second } @@ -61,10 +83,21 @@ func (n *NoBackoff) Sleep(d time.Duration) { time.Sleep(d) } +func (n *NoBackoff) SleepWithContext(ctx context.Context, d time.Duration) { + if d == 0 { + return + } + t := time.NewTimer(d) + defer t.Stop() + select { + case <-ctx.Done(): + case <-t.C: + } +} + // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - klog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,32 +109,74 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - klog.V(4).Infof("Error extracting url: %v", rawurl) - panic("bad url!") + panic(fmt.Sprintf("Error parsing bad URL %q: %v", rawurl, err)) } return host.Host } // UpdateBackoff updates backoff metadata -func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { +func (b *URLBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) { + b.UpdateBackoffWithContext(context.Background(), actualURL, err, responseCode) +} + +// UpdateBackoffWithContext updates backoff metadata +func (b *URLBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) { // range for retry counts that we store is [0,13] if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) { - b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) + b.Backoff.Next(b.baseUrlKey(actualURL), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + klog.FromContext(ctx).V(4).Info("Client is returning errors", "code", responseCode, "err", err) } //If we got this far, there is no backoff required for this URL anymore. - b.Backoff.Reset(b.baseUrlKey(actualUrl)) + b.Backoff.Reset(b.baseUrlKey(actualURL)) } // CalculateBackoff takes a url and back's off exponentially, // based on its knowledge of existing failures. -func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { - return b.Backoff.Get(b.baseUrlKey(actualUrl)) +func (b *URLBackoff) CalculateBackoff(actualURL *url.URL) time.Duration { + return b.Backoff.Get(b.baseUrlKey(actualURL)) +} + +// CalculateBackoffWithContext takes a url and back's off exponentially, +// based on its knowledge of existing failures. +func (b *URLBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration { + return b.Backoff.Get(b.baseUrlKey(actualURL)) } func (b *URLBackoff) Sleep(d time.Duration) { b.Backoff.Clock.Sleep(d) } + +func (b *URLBackoff) SleepWithContext(ctx context.Context, d time.Duration) { + if d == 0 { + return + } + t := b.Backoff.Clock.NewTimer(d) + defer t.Stop() + select { + case <-ctx.Done(): + case <-t.C(): + } +} + +// backoffManagerNopContext wraps a BackoffManager and adds the *WithContext methods. +type backoffManagerNopContext struct { + BackoffManager +} + +var _ BackoffManager = &backoffManagerNopContext{} +var _ BackoffManagerWithContext = &backoffManagerNopContext{} + +func (b *backoffManagerNopContext) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) { + b.UpdateBackoff(actualURL, err, responseCode) +} + +func (b *backoffManagerNopContext) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration { + return b.CalculateBackoff(actualURL) +} + +func (b *backoffManagerNopContext) SleepWithContext(ctx context.Context, d time.Duration) { + b.Sleep(d) +} diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go index ad493659..713b2d64 100644 --- a/vendor/k8s.io/client-go/rest/warnings.go +++ b/vendor/k8s.io/client-go/rest/warnings.go @@ -17,6 +17,7 @@ limitations under the License. package rest import ( + "context" "fmt" "io" "net/http" @@ -33,8 +34,15 @@ type WarningHandler interface { HandleWarningHeader(code int, agent string, text string) } +// WarningHandlerWithContext is an interface for handling warning headers with +// support for contextual logging. +type WarningHandlerWithContext interface { + // HandleWarningHeaderWithContext is called with the warn code, agent, and text when a warning header is countered. + HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, text string) +} + var ( - defaultWarningHandler WarningHandler = WarningLogger{} + defaultWarningHandler WarningHandlerWithContext = WarningLogger{} defaultWarningHandlerLock sync.RWMutex ) @@ -43,33 +51,68 @@ var ( // - NoWarnings suppresses warnings. // - WarningLogger logs warnings. // - NewWarningWriter() outputs warnings to the provided writer. +// +// logcheck:context // SetDefaultWarningHandlerWithContext should be used instead of SetDefaultWarningHandler in code which supports contextual logging. func SetDefaultWarningHandler(l WarningHandler) { + if l == nil { + SetDefaultWarningHandlerWithContext(nil) + return + } + SetDefaultWarningHandlerWithContext(warningLoggerNopContext{l: l}) +} + +// SetDefaultWarningHandlerWithContext is a variant of [SetDefaultWarningHandler] which supports contextual logging. +func SetDefaultWarningHandlerWithContext(l WarningHandlerWithContext) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() defaultWarningHandler = l } -func getDefaultWarningHandler() WarningHandler { + +func getDefaultWarningHandler() WarningHandlerWithContext { defaultWarningHandlerLock.RLock() defer defaultWarningHandlerLock.RUnlock() l := defaultWarningHandler return l } -// NoWarnings is an implementation of WarningHandler that suppresses warnings. +type warningLoggerNopContext struct { + l WarningHandler +} + +func (w warningLoggerNopContext) HandleWarningHeaderWithContext(_ context.Context, code int, agent string, message string) { + w.l.HandleWarningHeader(code, agent, message) +} + +// NoWarnings is an implementation of [WarningHandler] and [WarningHandlerWithContext] that suppresses warnings. type NoWarnings struct{} func (NoWarnings) HandleWarningHeader(code int, agent string, message string) {} +func (NoWarnings) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) { +} + +var _ WarningHandler = NoWarnings{} +var _ WarningHandlerWithContext = NoWarnings{} -// WarningLogger is an implementation of WarningHandler that logs code 299 warnings +// WarningLogger is an implementation of [WarningHandler] and [WarningHandlerWithContext] that logs code 299 warnings type WarningLogger struct{} func (WarningLogger) HandleWarningHeader(code int, agent string, message string) { if code != 299 || len(message) == 0 { return } - klog.Warning(message) + klog.Background().Info("Warning: " + message) } +func (WarningLogger) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + klog.FromContext(ctx).Info("Warning: " + message) +} + +var _ WarningHandler = WarningLogger{} +var _ WarningHandlerWithContext = WarningLogger{} + type warningWriter struct { // out is the writer to output warnings to out io.Writer @@ -134,14 +177,14 @@ func (w *warningWriter) WarningCount() int { return w.writtenCount } -func handleWarnings(headers http.Header, handler WarningHandler) []net.WarningHeader { +func handleWarnings(ctx context.Context, headers http.Header, handler WarningHandlerWithContext) []net.WarningHeader { if handler == nil { handler = getDefaultWarningHandler() } warnings, _ := net.ParseWarningHeaders(headers["Warning"]) for _, warning := range warnings { - handler.HandleWarningHeader(warning.Code, warning.Agent, warning.Text) + handler.HandleWarningHeaderWithContext(ctx, warning.Code, warning.Agent, warning.Text) } return warnings } diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index eaaadc6a..e211c39d 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -209,18 +209,18 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error { // we do a backoff sleep before the first attempt is made, // (preserving current behavior). if request.backoff != nil { - request.backoff.Sleep(request.backoff.CalculateBackoff(url)) + request.backoff.SleepWithContext(ctx, request.backoff.CalculateBackoffWithContext(ctx, url)) } return nil } // if we are here, we have made attempt(s) at least once before. if request.backoff != nil { - delay := request.backoff.CalculateBackoff(url) + delay := request.backoff.CalculateBackoffWithContext(ctx, url) if r.retryAfter.Wait > delay { delay = r.retryAfter.Wait } - request.backoff.Sleep(delay) + request.backoff.SleepWithContext(ctx, delay) } // We are retrying the request that we already send to @@ -231,7 +231,7 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error { return err } - klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", r.retryAfter.Wait, r.retryAfter.Attempt, request.URL().String()) + klog.FromContext(ctx).V(4).Info("Got a Retry-After response", "delay", r.retryAfter.Wait, "attempt", r.retryAfter.Attempt, "url", request.URL()) return nil } @@ -258,9 +258,9 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp if request.c.base != nil { if err != nil { - request.backoff.UpdateBackoff(request.URL(), err, 0) + request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, 0) } else { - request.backoff.UpdateBackoff(request.URL(), err, resp.StatusCode) + request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, resp.StatusCode) } } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index fd913a30..5871575a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package -package api // import "k8s.io/client-go/tools/clientcmd/api" +package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 9e483e9d..3ccdebc1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=Kind -package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1" +package v1 diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go index 424311ee..c07ace6a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go @@ -34,4 +34,4 @@ Sample usage from merged .kubeconfig files (local directory, home directory) client, err := metav1.New(config) // ... */ -package clientcmd // import "k8s.io/client-go/tools/clientcmd" +package clientcmd diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 7c7f1b33..b8dd8661 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -28,6 +28,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/metrics" + "k8s.io/klog/v2" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The @@ -116,10 +117,13 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // If we use are reloading files, we need to handle certificate rotation properly // TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true if config.TLS.ReloadTLSFiles && tlsConfig != nil && tlsConfig.GetClientCertificate != nil { - dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial) + // The TLS cache is a singleton, so sharing the same name for all of its + // background activity seems okay. + logger := klog.Background().WithName("tls-transport-cache") + dynamicCertDialer := certRotatingDialer(logger, tlsConfig.GetClientCertificate, dial) tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate dial = dynamicCertDialer.connDialer.DialContext - go dynamicCertDialer.Run(DialerStopCh) + go dynamicCertDialer.run(DialerStopCh) } proxy := http.ProxyFromEnvironment diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go index e76f6581..e343f42b 100644 --- a/vendor/k8s.io/client-go/transport/cert_rotation.go +++ b/vendor/k8s.io/client-go/transport/cert_rotation.go @@ -19,7 +19,6 @@ package transport import ( "bytes" "crypto/tls" - "fmt" "reflect" "sync" "time" @@ -40,6 +39,7 @@ var CertCallbackRefreshDuration = 5 * time.Minute type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error) type dynamicClientCert struct { + logger klog.Logger clientCert *tls.Certificate certMtx sync.RWMutex @@ -50,8 +50,9 @@ type dynamicClientCert struct { queue workqueue.TypedRateLimitingInterface[string] } -func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { +func certRotatingDialer(logger klog.Logger, reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ + logger: logger, reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), queue: workqueue.NewTypedRateLimitingQueueWithConfig( @@ -88,7 +89,7 @@ func (c *dynamicClientCert) loadClientCert() (*tls.Certificate, error) { return cert, nil } - klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials") + c.logger.V(1).Info("Certificate rotation detected, shutting down client connections to start using new credentials") c.connDialer.CloseAll() return cert, nil @@ -133,12 +134,12 @@ func byteMatrixEqual(left, right [][]byte) bool { } // run starts the controller and blocks until stopCh is closed. -func (c *dynamicClientCert) Run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() +func (c *dynamicClientCert) run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrashWithLogger(c.logger) defer c.queue.ShutDown() - klog.V(3).Infof("Starting client certificate rotation controller") - defer klog.V(3).Infof("Shutting down client certificate rotation controller") + c.logger.V(3).Info("Starting client certificate rotation controller") + defer c.logger.V(3).Info("Shutting down client certificate rotation controller") go wait.Until(c.runWorker, time.Second, stopCh) @@ -168,7 +169,7 @@ func (c *dynamicClientCert) processNextWorkItem() bool { return true } - utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + utilruntime.HandleErrorWithLogger(c.logger, err, "Loading client cert failed", "key", dsKey) c.queue.AddRateLimited(dsKey) return true diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 52fefb53..39fcebd9 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -21,10 +21,12 @@ import ( "fmt" "net/http" "net/http/httptrace" + "sort" "strings" "sync" "time" + "github.com/go-logr/logr" "golang.org/x/oauth2" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -68,19 +70,16 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip return rt, nil } -// DebugWrappers wraps a round tripper and logs based on the current log level. +// DebugWrappers potentially wraps a round tripper with a wrapper that logs +// based on the log level in the context of each individual request. +// +// At the moment, wrapping depends on the global log verbosity and is done +// if that verbosity is >= 6. This may change in the future. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { - switch { - case bool(klog.V(9).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugDetailedTiming, DebugResponseHeaders) - case bool(klog.V(8).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders) - case bool(klog.V(7).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus) - case bool(klog.V(6).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugURLTiming) + //nolint:logcheck // The actual logging is done with a different logger, so only checking here is okay. + if klog.V(6).Enabled() { + rt = NewDebuggingRoundTripper(rt, DebugByContext) } - return rt } @@ -380,14 +379,17 @@ func (r *requestInfo) toCurl() string { } } - return fmt.Sprintf("curl -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL) + // Newline at the end makes this look better in the text log output (the + // only usage of this method) because it becomes a multi-line string with + // no quoting. + return fmt.Sprintf("curl -v -X%s %s '%s'\n", r.RequestVerb, headers, r.RequestURL) } // debuggingRoundTripper will display information about the requests passing // through it based on what is configured type debuggingRoundTripper struct { delegatedRoundTripper http.RoundTripper - levels map[DebugLevel]bool + levels int } var _ utilnet.RoundTripperWrapper = &debuggingRoundTripper{} @@ -412,6 +414,26 @@ const ( DebugResponseHeaders // DebugDetailedTiming will add to the debug output the duration of the HTTP requests events. DebugDetailedTiming + // DebugByContext will add any of the above depending on the verbosity of the per-request logger obtained from the requests context. + // + // Can be combined in NewDebuggingRoundTripper with some of the other options, in which case the + // debug roundtripper will always log what is requested there plus the information that gets + // enabled by the context's log verbosity. + DebugByContext +) + +// Different log levels include different sets of information. +// +// Not exported because the exact content of log messages is not part +// of of the package API. +const ( + levelsV6 = (1 << DebugURLTiming) + // Logging *less* information for the response at level 7 compared to 6 replicates prior behavior: + // https://github.com/kubernetes/kubernetes/blob/2b472fe4690c83a2b343995f88050b2a3e9ff0fa/staging/src/k8s.io/client-go/transport/round_trippers.go#L79 + // Presumably that was done because verb and URL are already in the request log entry. + levelsV7 = (1 << DebugJustURL) | (1 << DebugRequestHeaders) | (1 << DebugResponseStatus) + levelsV8 = (1 << DebugJustURL) | (1 << DebugRequestHeaders) | (1 << DebugResponseStatus) | (1 << DebugResponseHeaders) + levelsV9 = (1 << DebugCurlCommand) | (1 << DebugURLTiming) | (1 << DebugDetailedTiming) | (1 << DebugResponseHeaders) ) // NewDebuggingRoundTripper allows to display in the logs output debug information @@ -419,10 +441,9 @@ const ( func NewDebuggingRoundTripper(rt http.RoundTripper, levels ...DebugLevel) http.RoundTripper { drt := &debuggingRoundTripper{ delegatedRoundTripper: rt, - levels: make(map[DebugLevel]bool, len(levels)), } for _, v := range levels { - drt.levels[v] = true + drt.levels |= 1 << v } return drt } @@ -464,27 +485,51 @@ func maskValue(key string, value string) string { } func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + logger := klog.FromContext(req.Context()) + levels := rt.levels + + // When logging depends on the context, it uses the verbosity of the per-context logger + // and a hard-coded mapping of verbosity to debug details. Otherwise all messages + // are logged as V(0). + if levels&(1< 0 { + logger.Info("Request", kvs...) } startTime := time.Now() - if rt.levels[DebugDetailedTiming] { + if levels&(1< 0 { + logger.Info("Response", kvs...) + } + + return response, err +} - if rt.levels[DebugResponseStatus] { - klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) +// headerMap formats headers sorted and across multiple lines with no quoting +// when using string output and as JSON when using zapr. +type headersMap http.Header + +// newHeadersMap masks all sensitive values. This has to be done before +// passing the map to a logger because while in practice all loggers +// either use String or MarshalLog, that is not guaranteed. +func newHeadersMap(header http.Header) headersMap { + h := make(headersMap, len(header)) + for key, values := range header { + maskedValues := make([]string, 0, len(values)) + for _, value := range values { + maskedValues = append(maskedValues, maskValue(key, value)) + } + h[key] = maskedValues } - if rt.levels[DebugResponseHeaders] { - klog.Info("Response Headers:") - for key, values := range reqInfo.ResponseHeaders { - for _, value := range values { - klog.Infof(" %s: %s", key, value) - } + return h +} + +var _ fmt.Stringer = headersMap{} +var _ logr.Marshaler = headersMap{} + +func (h headersMap) String() string { + // The fixed size typically avoids memory allocations when it is large enough. + keys := make([]string, 0, 20) + for key := range h { + keys = append(keys, key) + } + sort.Strings(keys) + var buffer strings.Builder + for _, key := range keys { + for _, value := range h[key] { + _, _ = buffer.WriteString(key) + _, _ = buffer.WriteString(": ") + _, _ = buffer.WriteString(value) + _, _ = buffer.WriteString("\n") } } + return buffer.String() +} - return response, err +func (h headersMap) MarshalLog() any { + return map[string][]string(h) } func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 8e312800..469dd817 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -182,7 +182,10 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - klog.Errorf("Unable to rotate token: %v", err) + // Not using a caller-provided logger isn't ideal, but impossible to fix + // without new APIs that go up all the way to HTTPWrappersForConfig. + // This is currently deemed not worth changing (too much effort, not enough benefit). + klog.TODO().Error(err, "Unable to rotate token") return ts.tok, nil } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 4770331a..8fdcc570 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -353,7 +353,7 @@ func tryCancelRequest(rt http.RoundTripper, req *http.Request) { case utilnet.RoundTripperWrapper: tryCancelRequest(rt.WrappedRoundTripper(), req) default: - klog.Warningf("Unable to cancel request for %T", rt) + klog.FromContext(req.Context()).Info("Warning: unable to cancel request", "roundTripperType", fmt.Sprintf("%T", rt)) } } diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 91e17127..12204612 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -90,11 +90,37 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro return x509.ParseCertificate(certDERBytes) } +// SelfSignedCertKeyOptions contains configuration parameters for generating self-signed certificates. +type SelfSignedCertKeyOptions struct { + // Host is required, and identifies the host of the serving certificate. Can be a DNS name or IP address. + Host string + // AlternateIPs is optional, and identifies additional IPs the serving certificate should be valid for. + AlternateIPs []net.IP + // AlternateDNS is optional, and identifies additional DNS names the serving certificate should be valid for. + AlternateDNS []string + + // MaxAge controls the duration of the issued certificate. + // Defaults to 1 year if unset. + // Ignored if FixtureDirectory is set. + MaxAge time.Duration + + // FixtureDirectory is intended for use in tests. + // If non-empty, it is a directory path which can contain pre-generated certs. The format is: + // _-_-.crt + // _-_-.key + // Certs/keys not existing in that directory are created with a duration of 100 years. + FixtureDirectory string +} + // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { - return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "") + return GenerateSelfSignedCertKeyWithOptions(SelfSignedCertKeyOptions{ + Host: host, + AlternateIPs: alternateIPs, + AlternateDNS: alternateDNS, + }) } // GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host. @@ -106,8 +132,26 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS // _-_-.key // Certs/keys not existing in that directory are created. func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) { + return GenerateSelfSignedCertKeyWithOptions(SelfSignedCertKeyOptions{ + Host: host, + AlternateIPs: alternateIPs, + AlternateDNS: alternateDNS, + FixtureDirectory: fixtureDirectory, + }) +} + +// GenerateSelfSignedCertKeyWithOptions generates a self-signed certificate and key based on the provided options. +func GenerateSelfSignedCertKeyWithOptions(opts SelfSignedCertKeyOptions) ([]byte, []byte, error) { + host := opts.Host + alternateIPs := opts.AlternateIPs + alternateDNS := opts.AlternateDNS + fixtureDirectory := opts.FixtureDirectory + maxAge := opts.MaxAge + if maxAge == 0 { + maxAge = 365 * 24 * time.Hour + } + validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew - maxAge := time.Hour * 24 * 365 // one year self-signed certs baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-")) certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt") diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 899b8e34..7cb46717 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -22,7 +22,6 @@ import ( "time" "k8s.io/utils/clock" - testingclock "k8s.io/utils/clock/testing" ) type backoffEntry struct { @@ -49,7 +48,7 @@ type Backoff struct { maxJitterFactor float64 } -func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff { +func NewFakeBackOff(initial, max time.Duration, tc clock.Clock) *Backoff { return newBackoff(tc, initial, max, 0.0) } @@ -57,7 +56,7 @@ func NewBackOff(initial, max time.Duration) *Backoff { return NewBackOffWithJitter(initial, max, 0.0) } -func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff { +func NewFakeBackOffWithJitter(initial, max time.Duration, tc clock.Clock, maxJitterFactor float64) *Backoff { return newBackoff(tc, initial, max, maxJitterFactor) } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index e33a6c69..da444f4f 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -22,6 +22,7 @@ import ( "time" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" "k8s.io/utils/clock" ) @@ -46,6 +47,10 @@ type DelayingQueueConfig = TypedDelayingQueueConfig[any] // TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. type TypedDelayingQueueConfig[T comparable] struct { + // An optional logger. The name of the queue does *not* get added to it, this should + // be done by the caller if desired. + Logger *klog.Logger + // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -94,6 +99,10 @@ func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { // NewTypedDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { + logger := klog.Background() + if config.Logger != nil { + logger = *config.Logger + } if config.Clock == nil { config.Clock = clock.RealClock{} } @@ -106,7 +115,7 @@ func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConf }) } - return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) + return newDelayingQueue(logger, config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to @@ -135,7 +144,7 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi }) } -func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { +func newDelayingQueue[T comparable](logger klog.Logger, clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { ret := &delayingType[T]{ TypedInterface: q, clock: clock, @@ -145,7 +154,7 @@ func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], metrics: newRetryMetrics(name, provider), } - go ret.waitingLoop() + go ret.waitingLoop(logger) return ret } @@ -264,8 +273,8 @@ func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType[T]) waitingLoop() { - defer utilruntime.HandleCrash() +func (q *delayingType[T]) waitingLoop(logger klog.Logger) { + defer utilruntime.HandleCrashWithLogger(logger) // Make a placeholder channel to use when there are no items in our list never := make(<-chan time.Time) diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index 8555aa95..a76d830e 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -23,4 +23,4 @@ limitations under the License. // - Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // - Shutdown notifications. -package workqueue // import "k8s.io/client-go/util/workqueue" +package workqueue diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go index 366bf20a..9f986a25 100644 --- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -74,7 +74,7 @@ func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWo wg.Add(workers) for i := 0; i < workers; i++ { go func() { - defer utilruntime.HandleCrash() + defer utilruntime.HandleCrashWithContext(ctx) defer wg.Done() for chunk := range toProcess { start := chunk * chunkSize diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go deleted file mode 100644 index 462c40c2..00000000 --- a/vendor/k8s.io/utils/clock/testing/fake_clock.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - "sync" - "time" - - "k8s.io/utils/clock" -) - -var ( - _ = clock.PassiveClock(&FakePassiveClock{}) - _ = clock.WithTicker(&FakeClock{}) - _ = clock.Clock(&IntervalClock{}) -) - -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -type FakePassiveClock struct { - lock sync.RWMutex - time time.Time -} - -// FakeClock implements clock.Clock, but returns an arbitrary time. -type FakeClock struct { - FakePassiveClock - - // waiters are waiting for the fake time to pass their specified time - waiters []*fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan time.Time - afterFunc func() -} - -// NewFakePassiveClock returns a new FakePassiveClock. -func NewFakePassiveClock(t time.Time) *FakePassiveClock { - return &FakePassiveClock{ - time: t, - } -} - -// NewFakeClock constructs a fake clock set to the provided time. -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - FakePassiveClock: *NewFakePassiveClock(t), - } -} - -// Now returns f's time. -func (f *FakePassiveClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakePassiveClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// SetTime sets the time on the FakePassiveClock. -func (f *FakePassiveClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.time = t -} - -// After is the fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, &fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -// NewTimer constructs a fake timer, akin to time.NewTimer(d). -func (f *FakeClock) NewTimer(d time.Duration) clock.Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }, - } - f.waiters = append(f.waiters, &timer.waiter) - return timer -} - -// AfterFunc is the Fake version of time.AfterFunc(d, cb). -func (f *FakeClock) AfterFunc(d time.Duration, cb func()) clock.Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - afterFunc: cb, - }, - } - f.waiters = append(f.waiters, &timer.waiter) - return timer -} - -// Tick constructs a fake ticker, akin to time.Tick -func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { - if d <= 0 { - return nil - } - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, &fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return ch -} - -// NewTicker returns a new Ticker. -func (f *FakeClock) NewTicker(d time.Duration) clock.Ticker { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, &fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return &fakeTicker{ - c: ch, - } -} - -// Step moves the clock by Duration and notifies anyone that's called After, -// Tick, or NewTimer. -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// SetTime sets the time. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]*fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := f.waiters[i] - if !w.targetTime.After(t) { - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.afterFunc != nil { - w.afterFunc() - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied (so you can -// write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -// Sleep is akin to time.Sleep -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. -// IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. -// -// Deprecated: See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// After is unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// NewTimer is unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTimer(d time.Duration) clock.Timer { - panic("IntervalClock doesn't implement NewTimer") -} - -// AfterFunc is unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) AfterFunc(d time.Duration, f func()) clock.Timer { - panic("IntervalClock doesn't implement AfterFunc") -} - -// Tick is unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement Tick") -} - -// NewTicker has no implementation yet and is omitted. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { - panic("IntervalClock doesn't implement NewTicker") -} - -// Sleep is unimplemented, will panic. -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} - -var _ = clock.Timer(&fakeTimer{}) - -// fakeTimer implements clock.Timer based on a FakeClock. -type fakeTimer struct { - fakeClock *FakeClock - waiter fakeClockWaiter -} - -// C returns the channel that notifies when this timer has fired. -func (f *fakeTimer) C() <-chan time.Time { - return f.waiter.destChan -} - -// Stop prevents the Timer from firing. It returns true if the call stops the -// timer, false if the timer has already expired or been stopped. -func (f *fakeTimer) Stop() bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - - active := false - newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters)) - for i := range f.fakeClock.waiters { - w := f.fakeClock.waiters[i] - if w != &f.waiter { - newWaiters = append(newWaiters, w) - continue - } - // If timer is found, it has not been fired yet. - active = true - } - - f.fakeClock.waiters = newWaiters - - return active -} - -// Reset changes the timer to expire after duration d. It returns true if the -// timer had been active, false if the timer had expired or been stopped. -func (f *fakeTimer) Reset(d time.Duration) bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - - active := false - - f.waiter.targetTime = f.fakeClock.time.Add(d) - - for i := range f.fakeClock.waiters { - w := f.fakeClock.waiters[i] - if w == &f.waiter { - // If timer is found, it has not been fired yet. - active = true - break - } - } - if !active { - f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter) - } - - return active -} - -type fakeTicker struct { - c <-chan time.Time -} - -func (t *fakeTicker) C() <-chan time.Time { - return t.c -} - -func (t *fakeTicker) Stop() { -} diff --git a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go deleted file mode 100644 index 951ca4d1..00000000 --- a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - "time" - - "k8s.io/utils/clock" -) - -var ( - _ = clock.PassiveClock(&SimpleIntervalClock{}) -) - -// SimpleIntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration -type SimpleIntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *SimpleIntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *SimpleIntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index cc6fb9c5..c17d33cc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,7 +6,7 @@ code.cloudfoundry.org/bbs/events code.cloudfoundry.org/bbs/format code.cloudfoundry.org/bbs/models code.cloudfoundry.org/bbs/trace -# code.cloudfoundry.org/bytefmt v0.34.0 +# code.cloudfoundry.org/bytefmt v0.39.0 ## explicit; go 1.23.0 code.cloudfoundry.org/bytefmt # code.cloudfoundry.org/cfhttp/v2 v2.44.0 @@ -19,7 +19,7 @@ code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/cfnetv1 code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/cfnetv1/internal code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/networkerror code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking/wrapper -# code.cloudfoundry.org/cli v0.0.0-20250311194037-c0bc8b6fa9c7 +# code.cloudfoundry.org/cli v0.0.0-20250410033454-7ef8a48b9bb3 ## explicit; go 1.23.6 code.cloudfoundry.org/cli/actor/actionerror code.cloudfoundry.org/cli/actor/sharedaction @@ -66,10 +66,11 @@ code.cloudfoundry.org/cli/util/lookuptable code.cloudfoundry.org/cli/util/manifest code.cloudfoundry.org/cli/util/railway code.cloudfoundry.org/cli/util/sorting +code.cloudfoundry.org/cli/util/trace code.cloudfoundry.org/cli/util/ui code.cloudfoundry.org/cli/util/unique code.cloudfoundry.org/cli/version -# code.cloudfoundry.org/clock v1.32.0 +# code.cloudfoundry.org/clock v1.37.0 ## explicit; go 1.23.0 code.cloudfoundry.org/clock # code.cloudfoundry.org/go-log-cache/v2 v2.0.7 @@ -86,13 +87,13 @@ code.cloudfoundry.org/jsonry code.cloudfoundry.org/jsonry/internal/errorcontext code.cloudfoundry.org/jsonry/internal/path code.cloudfoundry.org/jsonry/internal/tree -# code.cloudfoundry.org/lager/v3 v3.27.0 -## explicit; go 1.22.0 +# code.cloudfoundry.org/lager/v3 v3.32.0 +## explicit; go 1.23.0 code.cloudfoundry.org/lager/v3 code.cloudfoundry.org/lager/v3/internal/truncate # code.cloudfoundry.org/locket v0.0.0-20250423181647-b2b48694f201 ## explicit -# code.cloudfoundry.org/tlsconfig v0.22.0 +# code.cloudfoundry.org/tlsconfig v0.26.0 ## explicit; go 1.23.0 code.cloudfoundry.org/tlsconfig # code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d @@ -132,7 +133,7 @@ github.com/cespare/xxhash/v2 # github.com/charlievieth/fs v0.0.3 ## explicit; go 1.18 github.com/charlievieth/fs -# github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.8.0 +# github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.9.0 ## explicit; go 1.24.1 github.com/cloudfoundry-community/go-cf-clients-helper/v2 # github.com/cloudfoundry/bosh-cli v6.4.1+incompatible @@ -181,13 +182,12 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.2.0 -## explicit; go 1.12 -github.com/google/gofuzz -github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 ## explicit; go 1.23.0 github.com/google/pprof/profile +# github.com/google/uuid v1.6.0 +## explicit +github.com/google/uuid # github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule @@ -414,16 +414,16 @@ golang.org/x/time/rate golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector golang.org/x/tools/internal/astutil/edge -# google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 +# google.golang.org/genproto/googleapis/api v0.0.0-20250428153025-10db94c68c34 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.71.0 -## explicit; go 1.22.0 +# google.golang.org/grpc v1.72.0 +## explicit; go 1.23 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -540,10 +540,11 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/apimachinery v0.32.3 -## explicit; go 1.23.0 +# k8s.io/apimachinery v0.33.0 +## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/operation k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured @@ -580,8 +581,8 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.3 -## explicit; go 1.23.0 +# k8s.io/client-go v0.33.0 +## explicit; go 1.24.0 k8s.io/client-go/features k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install @@ -616,10 +617,9 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e +# k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 ## explicit; go 1.18 k8s.io/utils/clock -k8s.io/utils/clock/testing k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/ptr @@ -627,7 +627,11 @@ k8s.io/utils/ptr ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.6.0 +# sigs.k8s.io/randfill v1.0.0 +## explicit; go 1.18 +sigs.k8s.io/randfill +sigs.k8s.io/randfill/bytesource +# sigs.k8s.io/structured-merge-diff/v4 v4.7.0 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.4.0 diff --git a/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md new file mode 100644 index 00000000..7566c879 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md @@ -0,0 +1,43 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + + +## Project Management + +The [maintainers](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES#L12) of this project (and often others who have official positions on the [contributor ladder](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES)) are responsible for performing project management which oversees development and maintenance of the API, tests, tools, e.t.c. While we try to be generally flexible when it comes to the management of individual pieces (such as Issues or PRs), we have some rules and guidelines which help us plan, coordinate and reduce waste. In this section you'll find some rules/guidelines for contributors related to project management which may extend or go beyond what you would find in the standard [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide). + +### Bumping stale and closed Issues & PRs + +Maintainers are ultimately responsible for triaging new issues and PRs, accepting or declining them, deciding priority and fitting them into milestones intended for future releases. Bots are responsible for marking issues and PRs which stagnate as stale, or closing them if progress does not continue for a long period of time. Due to the nature of this community-driven development effort (we do not have dedicated engineering resources, we rely on the community which is effectively "volunteer time") **not all issues can be accepted, prioritized or completed**. + +You may find times when an issue you're subscribed to and interested in seems to stagnate, or perhaps gets auto-closed. Prior to bumping or directly re-opening issues yourself, we generally ask that you bring these up for discussion on the agenda for one of our community syncs if possible, or bring them up for discussion in Slack or the mailing list as this gives us a better opportunity to discuss the issue and determine viability and logistics. If feasible we **highly recommend being ready to contribute directly** to any stale or unprioritized effort that you want to see move forward, as **the best way to ensure progress is to engage with the community and personally invest time**. + +We (the community) aren't opposed to making exceptions in some cases, but when in doubt please follow the above guidelines before bumping closed or stale issues if you're not ready to personally invest time in them. We are responsible for managing these and without further context or engagement we may set these back to how they were previously organized. diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/sigs.k8s.io/randfill/LICENSE similarity index 99% rename from vendor/github.com/google/gofuzz/LICENSE rename to vendor/sigs.k8s.io/randfill/LICENSE index d6456956..9dd29274 100644 --- a/vendor/github.com/google/gofuzz/LICENSE +++ b/vendor/sigs.k8s.io/randfill/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,8 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 The gofuzz Authors + Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/sigs.k8s.io/randfill/NOTICE b/vendor/sigs.k8s.io/randfill/NOTICE new file mode 100644 index 00000000..6984e71f --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/NOTICE @@ -0,0 +1,24 @@ +When donating the randfill project to the CNCF, we could not reach all the +gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules +to donate a repository, we must add a NOTICE referencing section 7 of the CLA +with a list of developers who could not be reached. + +`7. Should You wish to submit work that is not Your original creation, You may +submit it to the Foundation separately from any Contribution, identifying the +complete details of its source and of any license or other restriction +(including, but not limited to, related patents, trademarks, and license +agreements) of which you are personally aware, and conspicuously marking the +work as "Submitted on behalf of a third-party: [named here]".` + +Submitted on behalf of a third-party: @dnephin (Daniel Nephin) +Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko) +Submitted on behalf of a third-party: @bbigras (Bruno Bigras) +Submitted on behalf of a third-party: @samirkut (Samir) +Submitted on behalf of a third-party: @posener (Eyal Posener) +Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul) +Submitted on behalf of a third-party: @kwongtailau (Kwongtai) +Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen) +Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin) +Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan) +Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU) +Submitted on behalf of a third-party: @disconnect3d (Disconnect3d) diff --git a/vendor/sigs.k8s.io/randfill/OWNERS b/vendor/sigs.k8s.io/randfill/OWNERS new file mode 100644 index 00000000..59f6a50f --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners +# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias. + +approvers: + - sig-testing-leads + - thockin + +reviewers: [] diff --git a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES new file mode 100644 index 00000000..927f1209 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES @@ -0,0 +1,14 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file should be kept in sync with k/org. + +aliases: + # Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES + sig-testing-leads: + - BenTheElder + - alvaroaleman + - aojea + - cjwagner + - jbpratt + - michelle192837 + - pohly + - xmcqueen diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/sigs.k8s.io/randfill/README.md similarity index 53% rename from vendor/github.com/google/gofuzz/README.md rename to vendor/sigs.k8s.io/randfill/README.md index b503aae7..d892fc9f 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/sigs.k8s.io/randfill/README.md @@ -1,39 +1,46 @@ -gofuzz +randfill ====== -gofuzz is a library for populating go objects with random values. +randfill is a library for populating go objects with random values. -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) +This is a fork of github.com/google/gofuzz, which was archived. + +NOTE: This repo is supported only for use within Kubernetes. It is not our +intention to support general use. That said, if it works for you, that's +great! If you have a problem, please feel free to file an issue, but be aware +that it may not be a priority for us to fix it unless it is affecting +Kubernetes. PRs are welcome, within reason. + +[![GoDoc](https://godoc.org/sigs.k8s.io/randfill?status.svg)](https://godoc.org/sigs.k8s.io/randfill) This is useful for testing: * Do your project's objects really serialize/unserialize correctly in all cases? * Is there an incorrectly formatted object that will cause your project to panic? -Import with ```import "github.com/google/gofuzz"``` +Import with ```import "sigs.k8s.io/randfill"``` You can use it on single variables: ```go -f := fuzz.New() +f := randfill.New() var myInt int -f.Fuzz(&myInt) // myInt gets a random value. +f.Fill(&myInt) // myInt gets a random value. ``` You can use it on maps: ```go -f := fuzz.New().NilChance(0).NumElements(1, 1) +f := randfill.New().NilChance(0).NumElements(1, 1) var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. +f.Fill(&myMap) // myMap will have exactly one element. ``` Customize the chance of getting a nil pointer: ```go -f := fuzz.New().NilChance(.5) +f := randfill.New().NilChance(.5) var fancyStruct struct { A, B, C, D *string } -f.Fuzz(&fancyStruct) // About half the pointers should be set. +f.Fill(&fancyStruct) // About half the pointers should be set. ``` You can even customize the randomization completely if needed: @@ -49,25 +56,27 @@ type MyInfo struct { BInfo *string } -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { +f := randfill.New().NilChance(0).Funcs( + func(e *MyInfo, c randfill.Continue) { switch c.Intn(2) { case 0: e.Type = A - c.Fuzz(&e.AInfo) + c.Fill(&e.AInfo) case 1: e.Type = B - c.Fuzz(&e.BInfo) + c.Fill(&e.BInfo) } }, ) var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +f.Fill(&myObject) // Type will correspond to whether A or B info is set. ``` See more examples in ```example_test.go```. +## dvyukov/go-fuzz integration + You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. go-fuzz provides the user a byte-slice, which should be converted to different inputs for the tested function. This library can help convert the byte slice. Consider for @@ -76,11 +85,11 @@ example a fuzz test for a the function `mypackage.MyFunc` that takes an int argu // +build gofuzz package mypackage -import fuzz "github.com/google/gofuzz" +import "sigs.k8s.io/randfill" func Fuzz(data []byte) int { var i int - fuzz.NewFromGoFuzz(data).Fuzz(&i) + randfill.NewFromGoFuzz(data).Fill(&i) MyFunc(i) return 0 } diff --git a/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS new file mode 100644 index 00000000..91d78533 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS @@ -0,0 +1,16 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +thockin +BenTheElder +aojea +pohly diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/sigs.k8s.io/randfill/bytesource/bytesource.go similarity index 100% rename from vendor/github.com/google/gofuzz/bytesource/bytesource.go rename to vendor/sigs.k8s.io/randfill/bytesource/bytesource.go diff --git a/vendor/sigs.k8s.io/randfill/code-of-conduct.md b/vendor/sigs.k8s.io/randfill/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/randfill/randfill.go b/vendor/sigs.k8s.io/randfill/randfill.go new file mode 100644 index 00000000..b7348248 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/randfill.go @@ -0,0 +1,682 @@ +/* +Copyright 2014 Google Inc. All rights reserved. +Copyright 2014 The gofuzz Authors. +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package randfill is a library for populating go objects with random values. +package randfill + +import ( + "fmt" + "math/rand" + "reflect" + "regexp" + "sync" + "time" + "unsafe" + + "strings" + + "sigs.k8s.io/randfill/bytesource" +) + +// funcMap is a map from a type to a function that randfills that type. The +// function is a reflect.Value because the type being filled is different for +// each func. +type funcMap map[reflect.Type]reflect.Value + +// Filler knows how to fill any object with random fields. +type Filler struct { + customFuncs funcMap + defaultFuncs funcMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + allowUnexportedFields bool + skipFieldPatterns []*regexp.Regexp + + lock sync.Mutex +} + +// New returns a new Filler. Customize your Filler further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Filler { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Filler { + f := &Filler{ + defaultFuncs: funcMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(randfillTime), + }, + + customFuncs: funcMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + allowUnexportedFields: false, + } + return f +} + +// NewFromGoFuzz is a helper function that enables using randfill (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Filler should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypackage_fuzz.go" with the content: +// +// // +build gofuzz +// package mypackage +// import "sigs.k8s.io/randfill" +// +// func Fuzz(data []byte) int { +// var i int +// randfill.NewFromGoFuzz(data).Fill(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Filler { + return New().RandSource(bytesource.New(data)) +} + +// Funcs registers custom fill functions for this Filler. +// +// Each entry in customFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// randfill.Continue, which will provide a source of randomness and a way +// to automatically continue filling smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// filling, the function `func(s *string, c randfill.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChance option. For slices, it +// doesn't make much sense to pre-create them--Filler doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +// +// If a function is already registered for a type, and a new function is +// provided, the previous function will be replaced with the new one. +func (f *Filler) Funcs(customFuncs ...interface{}) *Filler { + for i := range customFuncs { + v := reflect.ValueOf(customFuncs[i]) + if v.Kind() != reflect.Func { + panic("Filler.Funcs: all arguments must be functions") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Filler.Funcs: all customFuncs must have 2 arguments and 0 returns") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("Filler.Funcs: customFuncs' first argument must be a pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("Filler.Funcs: customFuncs' second argument must be a randfill.Continue") + } + f.customFuncs[argT] = v + } + return f +} + +// RandSource causes this Filler to get values from the given source of +// randomness. Use this if you want deterministic filling. +func (f *Filler) RandSource(s rand.Source) *Filler { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Filler) NilChance(p float64) *Filler { + if p < 0 || p > 1 { + panic("Filler.NilChance: p must be between 0 and 1, inclusive") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Filler) NumElements(min, max int) *Filler { + if min < 0 { + panic("Filler.NumElements: min must be >= 0") + } + if min > max { + panic("Filler.NumElements: min must be <= max") + } + f.minElements = min + f.maxElements = max + return f +} + +func (f *Filler) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Filler) genShouldFill() bool { + return f.r.Float64() >= f.nilChance +} + +// MaxDepth sets the maximum number of recursive fill calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Filler) MaxDepth(d int) *Filler { + f.maxDepth = d + return f +} + +// AllowUnexportedFields defines whether to fill unexported fields. +func (f *Filler) AllowUnexportedFields(flag bool) *Filler { + f.allowUnexportedFields = flag + return f +} + +// SkipFieldsWithPattern tells this Filler to skip any field whose name matches +// the supplied pattern. Call this multiple times if needed. This is useful to +// skip XXX_ fields generated by protobuf. +func (f *Filler) SkipFieldsWithPattern(pattern *regexp.Regexp) *Filler { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + +// SimpleSelfFiller represents an object that knows how to randfill itself. +// +// Unlike NativeSelfFiller, this interface does not cause the type in question +// to depend on the randfill package. This is most useful for simple types. For +// more complex types, consider using NativeSelfFiller. +type SimpleSelfFiller interface { + // RandFill fills the current object with random data. + RandFill(r *rand.Rand) +} + +// NativeSelfFiller represents an object that knows how to randfill itself. +// +// Unlike SimpleSelfFiller, this interface allows for recursive filling of +// child objects with the same rules as the parent Filler. +type NativeSelfFiller interface { + // RandFill fills the current object with random data. + RandFill(c Continue) +} + +// Fill recursively fills all of obj's fields with something random. First +// this tries to find a custom fill function (see Funcs). If there is no +// custom function, this tests whether the object implements SimpleSelfFiller +// or NativeSelfFiller and if so, calls RandFill on it to fill itself. If that +// fails, this will see if there is a default fill function provided by this +// package. If all of that fails, this will generate random values for all +// primitive fields and then recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Exported (public) fields can always be set, and if +// the AllowUnexportedFields() modifier was called it can try to set unexported +// (private) fields, too. +// +// This is intended for tests, so will panic on bad input or unimplemented +// types. This method takes a lock for the whole Filler, so it is not +// reentrant. See Continue. +func (f *Filler) Fill(obj interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("Filler.Fill: obj must be a pointer") + } + v = v.Elem() + f.fillWithContext(v, 0) +} + +// FillNoCustom is just like Fill, except that any custom fill function for +// obj's type will not be called and obj will not be tested for +// SimpleSelfFiller or NativeSelfFiller. This applies only to obj and not other +// instances of obj's type or to obj's child fields. +// +// obj must be a pointer. Exported (public) fields can always be set, and if +// the AllowUnexportedFields() modifier was called it can try to set unexported +// (private) fields, too. +// +// This is intended for tests, so will panic on bad input or unimplemented +// types. This method takes a lock for the whole Filler, so it is not +// reentrant. See Continue. +func (f *Filler) FillNoCustom(obj interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("Filler.FillNoCustom: obj must be a pointer") + } + v = v.Elem() + f.fillWithContext(v, flagNoCustomFill) +} + +const ( + // Do not try to find a custom fill function. Does not apply recursively. + flagNoCustomFill uint64 = 1 << iota +) + +func (f *Filler) fillWithContext(v reflect.Value, flags uint64) { + fc := &fillerContext{filler: f} + fc.doFill(v, flags) +} + +// fillerContext carries context about a single filling run, which lets Filler +// be thread-safe. +type fillerContext struct { + filler *Filler + curDepth int +} + +func (fc *fillerContext) doFill(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.filler.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + if !fc.filler.allowUnexportedFields || !v.CanAddr() { + return + } + v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem() + } + + if flags&flagNoCustomFill == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.filler.r) + return + } + + switch v.Kind() { + case reflect.Map: + if fc.filler.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.filler.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFill(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFill(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.filler.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFill(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.filler.genShouldFill() { + n := fc.filler.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFill(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.filler.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFill(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.filler.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFill(v.Field(i), 0) + } + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("can't fill type %v, kind %v", v.Type(), v.Kind())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fillerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fill function for it. + doCustom, ok := fc.filler.customFuncs[v.Type()] + if !ok { + // Second: see if it can fill itself. + if v.CanInterface() { + intf := v.Interface() + if fillable, ok := intf.(SimpleSelfFiller); ok { + fillable.RandFill(fc.filler.r) + return true + } + if fillable, ok := intf.(NativeSelfFiller); ok { + fillable.RandFill(Continue{fc: fc, Rand: fc.filler.r}) + return true + } + } + // Finally: see if there is a default fill function. + doCustom, ok = fc.filler.defaultFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{ + v, + reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.filler.r, + }), + }) + return true +} + +// Continue can be passed to custom fill functions to allow them to use +// the correct source of randomness and to continue filling their members. +type Continue struct { + fc *fillerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your filling + // to be repeatable for a given seed. + *rand.Rand +} + +// Fill continues filling obj. obj must be a pointer or a reflect.Value of a +// pointer. See Filler.Fill. +func (c Continue) Fill(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("Continue.Fill: obj must be a pointer") + } + v = v.Elem() + c.fc.doFill(v, 0) +} + +// FillNoCustom continues filling obj, except that any custom fill function for +// obj's type will not be called and obj will not be tested for +// SimpleSelfFiller or NativeSelfFiller. See Filler.FillNoCustom. +func (c Continue) FillNoCustom(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("Continue.FillNoCustom: obj must be a pointer") + } + v = v.Elem() + c.fc.doFill(v, flagNoCustomFill) +} + +const defaultStringMaxLen = 20 + +// String makes a random string up to n characters long. If n is 0, the default +// size range is [0-20). The returned string may include a variety of (valid) +// UTF-8 encodings. +func (c Continue) String(n int) string { + return randString(c.Rand, n) +} + +// Uint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) Uint64() uint64 { + return randUint64(c.Rand) +} + +// Bool returns true or false randomly. +func (c Continue) Bool() bool { + return randBool(c.Rand) +} + +func fillInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fillUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func randfillTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + // Nanosecond values greater than 1Bn are technically allowed but result in + // time.Time values with invalid timezone offsets. + nsec = c.Rand.Int63n(999999999) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fillInt, + reflect.Int8: fillInt, + reflect.Int16: fillInt, + reflect.Int32: fillInt, + reflect.Int64: fillInt, + reflect.Uint: fillUint, + reflect.Uint8: fillUint, + reflect.Uint16: fillUint, + reflect.Uint32: fillUint, + reflect.Uint64: fillUint, + reflect.Uintptr: fillUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex(r.Float64(), r.Float64())) + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r, 0)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("filling of UnsafePointers is not implemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 +} + +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune +} + +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFillFunc constructs a FillFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFillFunc(n int) func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand, n) + } +} + +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("UnicodeRange.check: the last encoding must be greater than the first") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand, max int) string { + if max == 0 { + max = defaultStringMaxLen + } + n := r.Intn(max) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when users do not set +// CustomStringFillFunc() but want to fill strings. +var defaultUnicodeRanges = UnicodeRanges{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// CustomStringFillFunc constructs a FillFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFillFunc(n int) func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand, n) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand, max int) string { + if max == 0 { + max = defaultStringMaxLen + } + n := r.Intn(max) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand, max int) string { + return defaultUnicodeRanges.randString(r, max) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go index d4adb8fc..3aadceb2 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go @@ -22,22 +22,77 @@ import ( "strings" ) +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + +func reflectIsZero(dv reflect.Value) bool { + return dv.IsZero() +} + +// OmitZeroFunc returns a function for a type for a given struct field +// which determines if the value for that field is a zero value, matching +// how the stdlib JSON implementation. +func OmitZeroFunc(t reflect.Type) func(reflect.Value) bool { + // Provide a function that uses a type's IsZero method. + // This matches the go 1.24 custom IsZero() implementation matching + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + return func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return safeIsNil(v) || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + return func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return safeIsNil(v) || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + return func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + return func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + default: + // default to the reflect.IsZero implementation + return reflectIsZero + } +} + // TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 // but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go -func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) { +func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool, omitzero func(reflect.Value) bool) { tag := f.Tag.Get("json") if tag == "-" { - return "", true, false, false + return "", true, false, false, nil } name, opts := parseTag(tag) if name == "" { name = f.Name } - return name, false, opts.Contains("inline"), opts.Contains("omitempty") + + if opts.Contains("omitzero") { + omitzero = OmitZeroFunc(f.Type) + } + + return name, false, opts.Contains("inline"), opts.Contains("omitempty"), omitzero } -func isZero(v reflect.Value) bool { +func isEmpty(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index 88693b87..3b4a402e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -59,6 +59,8 @@ type FieldCacheEntry struct { JsonName string // isOmitEmpty is true if the field has the json 'omitempty' tag. isOmitEmpty bool + // omitzero is set if the field has the json 'omitzero' tag. + omitzero func(reflect.Value) bool // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of // a field in a reflect.Value struct. The field indices in the list form a path used // to traverse through intermediary 'inline' fields. @@ -69,7 +71,13 @@ type FieldCacheEntry struct { } func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { - return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) + if f.isOmitEmpty && (safeIsNil(fieldVal) || isEmpty(fieldVal)) { + return true + } + if f.omitzero != nil && f.omitzero(fieldVal) { + return true + } + return false } // GetFrom returns the field identified by this FieldCacheEntry from the provided struct. @@ -147,7 +155,7 @@ func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCache func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { for i := 0; i < t.NumField(); i++ { field := t.Field(i) - jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) + jsonName, omit, isInline, isOmitempty, omitzero := lookupJsonTags(field) if omit { continue } @@ -161,7 +169,7 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi } continue } - info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} + info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, omitzero: omitzero, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} infos[jsonName] = info } } From 466623f029500c3069beb1798ac270e1db6fbaec Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Sat, 3 May 2025 11:08:00 +0200 Subject: [PATCH 10/13] use ActualLRPs request to test connectivity to bbs to retrieve better error info if needed --- fetcher/bbs_client.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fetcher/bbs_client.go b/fetcher/bbs_client.go index a3b64e91..4c6b407b 100644 --- a/fetcher/bbs_client.go +++ b/fetcher/bbs_client.go @@ -70,8 +70,9 @@ func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { func (b *BBSClient) TestConnection() error { traceID := trace.GenerateTraceID() - if b.client.Ping(b.logger, traceID) { - return nil + _, err := b.client.ActualLRPs(b.logger, traceID, models.ActualLRPFilter{}) + if err != nil { + return fmt.Errorf("error connecting to BBS: %s", err) } - return fmt.Errorf("failed to ping BBS") + return nil } From f48ec28a4e1c5713017bf771b3aa46a0bd21e85c Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Sat, 3 May 2025 11:16:02 +0200 Subject: [PATCH 11/13] fix fetcher/sessionext_test --- fetcher/sessionext_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fetcher/sessionext_test.go b/fetcher/sessionext_test.go index b336f8e4..ac9698fd 100644 --- a/fetcher/sessionext_test.go +++ b/fetcher/sessionext_test.go @@ -54,8 +54,8 @@ var _ = ginkgo.Describe("Extensions", func() { server.AppendHandlers( ghttp.CombineHandlers( ghttp.VerifyRequest("GET", "/"), - ghttp.RespondWith(http.StatusOK, serialize(ccv3.Info{ - Links: ccv3.InfoLinks{ + ghttp.RespondWith(http.StatusOK, serialize(ccv3.Root{ + Links: ccv3.RootLinks{ Login: resources.APILink{HREF: server.URL()}, UAA: resources.APILink{HREF: server.URL()}, }, From e0c44a353e71b7c68ae08fe59dced94d8cbac25c Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Sun, 4 May 2025 20:21:19 +0200 Subject: [PATCH 12/13] starts github.com/cloudfoundry/cf_exporter/v2 as abandon CF API v2 presents a breaking change --- README.md | 2 +- collectors/applications.go | 2 +- collectors/buildpacks.go | 2 +- collectors/collectors.go | 6 +++--- collectors/domains.go | 2 +- collectors/events.go | 2 +- collectors/isolation_segments.go | 2 +- collectors/organizations.go | 2 +- collectors/route_bindings.go | 2 +- collectors/routes.go | 2 +- collectors/security_groups.go | 2 +- collectors/service_bindings.go | 2 +- collectors/service_instances.go | 2 +- collectors/service_plans.go | 2 +- collectors/services.go | 2 +- collectors/spaces.go | 2 +- collectors/stacks.go | 2 +- collectors/tasks.go | 2 +- fetcher/fetcher.go | 4 ++-- fetcher/fetcher_handlers.go | 2 +- fetcher/fetcher_test.go | 2 +- fetcher/sessionext.go | 2 +- fetcher/sessionext_test.go | 2 +- fetcher/worker.go | 4 ++-- filters/filters_test.go | 2 +- go.mod | 2 +- main.go | 6 +++--- packages/cf/manifest.yml | 2 +- 28 files changed, 34 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index bcd03427..387df47d 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ $ ./cf_exporter Using the standard `go install` (you must have [Go][golang] already installed in your local machine): ```bash -$ go install github.com/cloudfoundry/cf_exporter +$ go install github.com/cloudfoundry/cf_exporter/v2 $ cf_exporter ``` diff --git a/collectors/applications.go b/collectors/applications.go index dd5cf5be..92ba5c83 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -7,7 +7,7 @@ import ( "time" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) diff --git a/collectors/buildpacks.go b/collectors/buildpacks.go index 4158d94d..bad593a0 100644 --- a/collectors/buildpacks.go +++ b/collectors/buildpacks.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/collectors.go b/collectors/collectors.go index 6955ecf0..c546678f 100644 --- a/collectors/collectors.go +++ b/collectors/collectors.go @@ -1,9 +1,9 @@ package collectors import ( - "github.com/cloudfoundry/cf_exporter/fetcher" - "github.com/cloudfoundry/cf_exporter/filters" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/fetcher" + "github.com/cloudfoundry/cf_exporter/v2/filters" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/domains.go b/collectors/domains.go index 0010997e..1ca0c106 100644 --- a/collectors/domains.go +++ b/collectors/domains.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/events.go b/collectors/events.go index 9627eeb5..d1ffb39b 100644 --- a/collectors/events.go +++ b/collectors/events.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/isolation_segments.go b/collectors/isolation_segments.go index 3cab1fe7..3040adb9 100644 --- a/collectors/isolation_segments.go +++ b/collectors/isolation_segments.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/organizations.go b/collectors/organizations.go index d4dd0920..16b086cb 100644 --- a/collectors/organizations.go +++ b/collectors/organizations.go @@ -6,7 +6,7 @@ import ( "time" "code.cloudfoundry.org/cli/resources" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) diff --git a/collectors/route_bindings.go b/collectors/route_bindings.go index 67a4cbd5..9b68abc7 100644 --- a/collectors/route_bindings.go +++ b/collectors/route_bindings.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/routes.go b/collectors/routes.go index 10b3c159..2deb5d06 100644 --- a/collectors/routes.go +++ b/collectors/routes.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/security_groups.go b/collectors/security_groups.go index 29581f82..2dd9695c 100644 --- a/collectors/security_groups.go +++ b/collectors/security_groups.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/service_bindings.go b/collectors/service_bindings.go index e38be5ca..28025e5d 100644 --- a/collectors/service_bindings.go +++ b/collectors/service_bindings.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/service_instances.go b/collectors/service_instances.go index 7c743603..ed8844c1 100644 --- a/collectors/service_instances.go +++ b/collectors/service_instances.go @@ -4,7 +4,7 @@ import ( "time" "code.cloudfoundry.org/cli/resources" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/service_plans.go b/collectors/service_plans.go index 1366901c..eda68133 100644 --- a/collectors/service_plans.go +++ b/collectors/service_plans.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/services.go b/collectors/services.go index c19ee353..2fd8fc68 100644 --- a/collectors/services.go +++ b/collectors/services.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/spaces.go b/collectors/spaces.go index a28a1229..626ba1ea 100644 --- a/collectors/spaces.go +++ b/collectors/spaces.go @@ -6,7 +6,7 @@ import ( "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "code.cloudfoundry.org/cli/resources" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) diff --git a/collectors/stacks.go b/collectors/stacks.go index a3925e95..03106210 100644 --- a/collectors/stacks.go +++ b/collectors/stacks.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/collectors/tasks.go b/collectors/tasks.go index 6fd0b4a3..d95707cc 100644 --- a/collectors/tasks.go +++ b/collectors/tasks.go @@ -3,7 +3,7 @@ package collectors import ( "time" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" "github.com/prometheus/client_golang/prometheus" ) diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index 21be85ba..81ffe2c0 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -5,8 +5,8 @@ import ( "time" "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" - "github.com/cloudfoundry/cf_exporter/filters" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/filters" + "github.com/cloudfoundry/cf_exporter/v2/models" log "github.com/sirupsen/logrus" ) diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index be7a0787..3031971c 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -8,7 +8,7 @@ import ( "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" "code.cloudfoundry.org/cli/resources" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" log "github.com/sirupsen/logrus" ) diff --git a/fetcher/fetcher_test.go b/fetcher/fetcher_test.go index a2e54d15..bd95078d 100644 --- a/fetcher/fetcher_test.go +++ b/fetcher/fetcher_test.go @@ -4,7 +4,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - "github.com/cloudfoundry/cf_exporter/filters" + "github.com/cloudfoundry/cf_exporter/v2/filters" ) var _ = ginkgo.Describe("Fetcher", func() { diff --git a/fetcher/sessionext.go b/fetcher/sessionext.go index c3fd51de..72202c49 100644 --- a/fetcher/sessionext.go +++ b/fetcher/sessionext.go @@ -7,7 +7,7 @@ import ( "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" clients "github.com/cloudfoundry-community/go-cf-clients-helper/v2" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" log "github.com/sirupsen/logrus" ) diff --git a/fetcher/sessionext_test.go b/fetcher/sessionext_test.go index ac9698fd..bf8921d2 100644 --- a/fetcher/sessionext_test.go +++ b/fetcher/sessionext_test.go @@ -16,7 +16,7 @@ import ( "encoding/json" "net/http" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/models" ) const ( diff --git a/fetcher/worker.go b/fetcher/worker.go index 04780bc4..5ecc0520 100644 --- a/fetcher/worker.go +++ b/fetcher/worker.go @@ -4,8 +4,8 @@ import ( "sync" "time" - "github.com/cloudfoundry/cf_exporter/filters" - "github.com/cloudfoundry/cf_exporter/models" + "github.com/cloudfoundry/cf_exporter/v2/filters" + "github.com/cloudfoundry/cf_exporter/v2/models" log "github.com/sirupsen/logrus" ) diff --git a/filters/filters_test.go b/filters/filters_test.go index c5107cc4..a285cbc4 100644 --- a/filters/filters_test.go +++ b/filters/filters_test.go @@ -4,7 +4,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - "github.com/cloudfoundry/cf_exporter/filters" + "github.com/cloudfoundry/cf_exporter/v2/filters" ) var _ = ginkgo.Describe("Filters", func() { diff --git a/go.mod b/go.mod index 9447a778..36881cac 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/cloudfoundry/cf_exporter +module github.com/cloudfoundry/cf_exporter/v2 go 1.24.1 diff --git a/main.go b/main.go index 1fd69f07..943e7d3d 100644 --- a/main.go +++ b/main.go @@ -7,9 +7,9 @@ import ( "time" kingpin "github.com/alecthomas/kingpin/v2" - "github.com/cloudfoundry/cf_exporter/collectors" - "github.com/cloudfoundry/cf_exporter/fetcher" - "github.com/cloudfoundry/cf_exporter/filters" + "github.com/cloudfoundry/cf_exporter/v2/collectors" + "github.com/cloudfoundry/cf_exporter/v2/fetcher" + "github.com/cloudfoundry/cf_exporter/v2/filters" "github.com/prometheus/client_golang/prometheus" versionCollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" diff --git a/packages/cf/manifest.yml b/packages/cf/manifest.yml index 92ac6e3c..c65c382f 100644 --- a/packages/cf/manifest.yml +++ b/packages/cf/manifest.yml @@ -3,7 +3,7 @@ applications: - name: cf-exporter buildpack: go_buildpack env: - GOPACKAGENAME: github.com/cloudfoundry/cf_exporter + GOPACKAGENAME: github.com/cloudfoundry/cf_exporter/v2 CF_EXPORTER_CF_API_URL: "Your Cloud Foundry API URL" CF_EXPORTER_CF_USERNAME: "Your Cloud Foundry Username" CF_EXPORTER_CF_PASSWORD: "Your Cloud Foundry Password" From 3afab1184cd022268f647c134fe227000a537421 Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Sun, 18 May 2025 19:15:56 +0200 Subject: [PATCH 13/13] Update README.md - Explicit the fact that configure BBS values is optional --- README.md | 79 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 387df47d..c58e2178 100644 --- a/README.md +++ b/README.md @@ -134,18 +134,22 @@ Flags: Either `--cf.username` and `--cf.password` or `--cf.client-id` and `--cf.client-secret` must be provided. +BBS configuration (`--bbs.api_url`, `--bbs.ca_file`, `--bbs.cert_file`, `--bbs.key_file`, `--bbs.skip_ssl_verify` and +`--bbs.timeout`) can be omitted but is required if you want metrics from the BBS API ( +`._application_instances_running`) to be included. + ### Metrics The exporter returns the following `Applications` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|---------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | *metrics.namespace*_application_info | Labeled Cloud Foundry Application information with a constant `1` value | `environment`, `deployment`, `application_id`, `application_name`, `detected_buildpack`, `buildpack`, `organization_id`, `organization_name`, `space_id`, `space_name`, `stack_id`, `state` | | *metrics.namespace*_application_instances | Number of desired Cloud Foundry Application Instances | `environment`, `deployment`, `application_id`, `application_name`, `organization_id`, `organization_name`, `space_id`, `space_name`, `state` | -| *metrics.namespace*_application_instances_running | Number of running Cloud Foundry Application Instances | `environment`, `deployment`, `application_id`, `application_name`, `organization_id`, `organization_name`, `space_id`, `space_name`, `state` | +| *metrics.namespace*_application_instances_running | Number of running Cloud Foundry Application Instances (only included if BBS configuration is given) | `environment`, `deployment`, `application_id`, `application_name`, `organization_id`, `organization_name`, `space_id`, `space_name`, `state` | | *metrics.namespace*_application_memory_mb | Cloud Foundry Application Memory (Mb) | `environment`, `deployment`, `application_id`, `application_name`, `organization_id`, `organization_name`, `space_id`, `space_name` | | *metrics.namespace*_application_disk_quota_mb | Cloud Foundry Application Disk Quota (Mb) | `environment`, `deployment`, `application_id`, `application_name`, `organization_id`, `organization_name`, `space_id`, `space_name` | -| *metrics.namespace*_application_buildpack | All the buildpacks used by an Application. | `environment`, `deployment`, `application_id`, `application_name`, `buildpack_name` +| *metrics.namespace*_application_buildpack | All the buildpacks used by an Application. | `environment`, `deployment`, `application_id`, `application_name`, `buildpack_name` | | *metrics.namespace*_applications_scrapes_total | Total number of scrapes for Cloud Foundry Applications | `environment`, `deployment` | | *metrics.namespace*_applications_scrape_errors_total | Total number of scrape errors of Cloud Foundry Applications | `environment`, `deployment` | | *metrics.namespace*_last_applications_scrape_error | Whether the last scrape of Applications metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | @@ -155,7 +159,7 @@ The exporter returns the following `Applications` metrics: The exporter returns the following `Buildpacks` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|-------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------| | *metrics.namespace*_buildpack_info | Labeled Cloud Foundry Buildpacks information with a constant `1` value | `environment`, `deployment`, `buildpack_id`, `buildpack_name`, `buildpack_stack`, `buildpack_filename` | | *metrics.namespace*_buildpacks_scrapes_total | Total number of scrapes for Cloud Foundry Buildpacks | `environment`, `deployment` | | *metrics.namespace*_buildpacks_scrape_errors_total | Total number of scrape errors of Cloud Foundry Buildpacks | `environment`, `deployment` | @@ -165,19 +169,19 @@ The exporter returns the following `Buildpacks` metrics: The exporter returns the following `Domain` metrics: -| Metric | Description | Labels | -| ------ | ----------- | ------ | -| *metrics.namespace*_domain_info | Cloud Foundry domains, labeled by domain ID, name, whether it is internal, and supported protocol. Metric value is set to 1. | `environment`, `deployment`, `domain_id`, `domain_name`, `internal`, `protocol` | -| *metrics.namespace*_domain_scrapes_total | Total number of scrapes for Cloud Foundry Domains | `environment`, `deployment` | -| *metrics.namespace*_domain_scrape_errors_total | Total number of scrape errors of Cloud Foundry Domains | `environment`, `deployment` | -| *metrics.namespace*_last_domains_scrape_error | Whether the last scrape of Domain metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | -| *metrics.namespace*_last_domains_scrape_timestamp | Number of seconds since 1970 since last scrape of Domain metrics from Cloud Foundry | `environment`, `deployment` | -| *metrics.namespace*_last_domains_scrape_duration_seconds | Duration of the last scrape of Domain metrics from Cloud Foundry | `environment`, `deployment` +| Metric | Description | Labels | +|----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------| +| *metrics.namespace*_domain_info | Cloud Foundry domains, labeled by domain ID, name, whether it is internal, and supported protocol. Metric value is set to 1. | `environment`, `deployment`, `domain_id`, `domain_name`, `internal`, `protocol` | +| *metrics.namespace*_domain_scrapes_total | Total number of scrapes for Cloud Foundry Domains | `environment`, `deployment` | +| *metrics.namespace*_domain_scrape_errors_total | Total number of scrape errors of Cloud Foundry Domains | `environment`, `deployment` | +| *metrics.namespace*_last_domains_scrape_error | Whether the last scrape of Domain metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | +| *metrics.namespace*_last_domains_scrape_timestamp | Number of seconds since 1970 since last scrape of Domain metrics from Cloud Foundry | `environment`, `deployment` | +| *metrics.namespace*_last_domains_scrape_duration_seconds | Duration of the last scrape of Domain metrics from Cloud Foundry | `environment`, `deployment` | The exporter returns the following `Events` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|---------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| | *metrics.namespace*_events_info | Labeled Cloud Foundry Events information with a constant `1` value | `environment`, `deployment`, `type`, `actor`, `actor_type`, `actor_name`, `actor_username`, `actee`, `actee_type`, `actee_name`, `space_id`, `organization_id` | | *metrics.namespace*_events_scrapes_total | Total number of scrapes for Cloud Foundry Events | `environment`, `deployment` | | *metrics.namespace*_events_scrape_errors_total | Total number of scrape errors of Cloud Foundry Events | `environment`, `deployment` | @@ -188,7 +192,7 @@ The exporter returns the following `Events` metrics: The exporter returns the following `IsolationSegments` metrics (requires `cf.api-v3-enabled` enabled): | Metric | Description | Labels | -| ------ | ----------- | ------ | +|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------| | *metrics.namespace*_isolation_segment_info | Labeled Cloud Foundry Isolation Segment information with a constant `1` value | `environment`, `deployment`, `isolation_segment_id`, `isolation_segment_name` | | *metrics.namespace*_isolation_segments_scrapes_total | Total number of scrapes for Cloud Foundry Isolation Segments | `environment`, `deployment` | | *metrics.namespace*_isolation_segments_scrape_errors_total | Total number of scrape errors of Cloud Foundry Isolation Segments | `environment`, `deployment` | @@ -199,7 +203,7 @@ The exporter returns the following `IsolationSegments` metrics (requires `cf.api The exporter returns the following `Organizations` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| | *metrics.namespace*_organization_info | Labeled Cloud Foundry Organization information with a constant `1` value | `environment`, `deployment`, `organization_id`, `organization_name`, `quota_name` | | *metrics.namespace*_organization_non_basic_services_allowed | A Cloud Foundry Organization can provision instances of paid service plans? (`1` for `true`, `0` for `false`) | `environment`, `deployment`, `organization_id`, `organization_name` | | *metrics.namespace*_organization_instance_memory_mb_limit | Maximum amount of memory (Mb) an application instance can have in a Cloud Foundry Organization | `environment`, `deployment`, `organization_id`, `organization_name` | @@ -220,7 +224,7 @@ The exporter returns the following `Organizations` metrics: The exporter returns the following `Routes` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|---------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| | *metrics.namespace*_route_info | Labeled Cloud Foundry Route information with a constant `1` value | `environment`, `deployment`, `route_id`, `route_host`, `route_path`, `domain_id`, `space_id`, `service_instance_id` | | *metrics.namespace*_routes_scrapes_total | Total number of scrapes for Cloud Foundry Routes | `environment`, `deployment` | | *metrics.namespace*_routes_scrape_errors_total | Total number of scrape errors of Cloud Foundry Routes | `environment`, `deployment` | @@ -231,7 +235,7 @@ The exporter returns the following `Routes` metrics: The exporter returns the following `Security Groups` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| | *metrics.namespace*_security_group_info | Labeled Cloud Foundry Security Group information with a constant `1` value | `environment`, `deployment`, `security_group_id`, `security_group_name` | | *metrics.namespace*_security_groups_scrapes_total | Total number of scrapes for Cloud Foundry Security Groups | `environment`, `deployment` | | *metrics.namespace*_security_groups_scrape_errors_total | Total number of scrape errors of Cloud Foundry Security Groups | `environment`, `deployment` | @@ -242,7 +246,7 @@ The exporter returns the following `Security Groups` metrics: The exporter returns the following `Services` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------| | *metrics.namespace*_service_info | Labeled Cloud Foundry Service information with a constant `1` value | `environment`, `deployment`, `service_id`, `service_label` | | *metrics.namespace*_services_scrapes_total | Total number of scrapes for Cloud Foundry Services | `environment`, `deployment` | | *metrics.namespace*_services_scrape_errors_total | Total number of scrape errors of Cloud Foundry Services | `environment`, `deployment` | @@ -252,31 +256,30 @@ The exporter returns the following `Services` metrics: The exporter returns the following `Service Bindings` metrics: -| Metric | Description | Labels | -| ------ | ----------- | ------ | +| Metric | Description | Labels | +|-------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------| | *metrics.namespace*_service_binding_info | Labeled Cloud Foundry Service Binding information with a constant `1` value | `environment`, `deployment`, `service_binding_id`, `application_id`, `service_instance_id`, `route_binding_id` | -| *metrics.namespace*_service_bindings_scrapes_total | Total number of scrapes for Cloud Foundry Service Bindings | `environment`, `deployment` | -| *metrics.namespace*_service_bindings_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Bindings | `environment`, `deployment` | -| *metrics.namespace*_last_service_bindings_scrape_error | Whether the last scrape of Service Bindings metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | -| *metrics.namespace*_last_service_bindings_scrape_timestamp | Number of seconds since 1970 since last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | -| *metrics.namespace*_last_service_bindings_scrape_duration_seconds | Duration of the last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | - +| *metrics.namespace*_service_bindings_scrapes_total | Total number of scrapes for Cloud Foundry Service Bindings | `environment`, `deployment` | +| *metrics.namespace*_service_bindings_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Bindings | `environment`, `deployment` | +| *metrics.namespace*_last_service_bindings_scrape_error | Whether the last scrape of Service Bindings metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | +| *metrics.namespace*_last_service_bindings_scrape_timestamp | Number of seconds since 1970 since last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | +| *metrics.namespace*_last_service_bindings_scrape_duration_seconds | Duration of the last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | The exporter returns the following `Service Route Bindings` metrics: -| Metric | Description | Labels | -| ------ | ----------- | ------ | -| *metrics.namespace*_service_route_binding_info | Labeled Cloud Foundry Service Route Binding information with a constant `1` value | `environment`, `deployment`, `route_id`, `route_service_url`, `service_instance_id`, `service_route_binding_id` | -| *metrics.namespace*_service_route_bindings_scrapes_total | Total number of scrapes for Cloud Foundry Service Bindings | `environment`, `deployment` | -| *metrics.namespace*_service_route_bindings_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Bindings | `environment`, `deployment` | -| *metrics.namespace*_last_service_route_bindings_scrape_error | Whether the last scrape of Service Bindings metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | -| *metrics.namespace*_last_service_route_bindings_scrape_timestamp | Number of seconds since 1970 since last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | -| *metrics.namespace*_last_service_route_bindings_scrape_duration_seconds | Duration of the last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | +| Metric | Description | Labels | +|-------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| *metrics.namespace*_service_route_binding_info | Labeled Cloud Foundry Service Route Binding information with a constant `1` value | `environment`, `deployment`, `route_id`, `route_service_url`, `service_instance_id`, `service_route_binding_id` | +| *metrics.namespace*_service_route_bindings_scrapes_total | Total number of scrapes for Cloud Foundry Service Bindings | `environment`, `deployment` | +| *metrics.namespace*_service_route_bindings_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Bindings | `environment`, `deployment` | +| *metrics.namespace*_last_service_route_bindings_scrape_error | Whether the last scrape of Service Bindings metrics from Cloud Foundry resulted in an error (`1` for error, `0` for success) | `environment`, `deployment` | +| *metrics.namespace*_last_service_route_bindings_scrape_timestamp | Number of seconds since 1970 since last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | +| *metrics.namespace*_last_service_route_bindings_scrape_duration_seconds | Duration of the last scrape of Service Bindings metrics from Cloud Foundry | `environment`, `deployment` | The exporter returns the following `Service Instances` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | *metrics.namespace*_service_instance_info | Labeled Cloud Foundry Service Instance information with a constant `1` value | `environment`, `deployment`, `service_instance_id`, `service_instance_name`, `service_plan_id`, `space_id`, `type`, `last_operation_type`, `last_operation_state` | | *metrics.namespace*_service_instances_scrapes_total | Total number of scrapes for Cloud Foundry Service Instances | `environment`, `deployment` | | *metrics.namespace*_service_instances_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Instances | `environment`, `deployment` | @@ -287,7 +290,7 @@ The exporter returns the following `Service Instances` metrics: The exporter returns the following `Service Plans` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------| | *metrics.namespace*_service_plan_info | Labeled Cloud Foundry Service Plan information with a constant `1` value | `environment`, `deployment`, `service_plan_id`, `service_plane_name`, `service_id` | | *metrics.namespace*_service_plans_scrapes_total | Total number of scrapes for Cloud Foundry Service Plans | `environment`, `deployment` | | *metrics.namespace*_service_plans_scrape_errors_total | Total number of scrape errors of Cloud Foundry Service Plans | `environment`, `deployment` | @@ -298,7 +301,7 @@ The exporter returns the following `Service Plans` metrics: The exporter returns the following `Spaces` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------| | *metrics.namespace*_space_info | Labeled Cloud Foundry Space information with a constant `1` value | `environment`, `deployment`, `space_id`, `space_name`, `organization_id`, `quota_name` | | *metrics.namespace*_space_non_basic_services_allowed | A Cloud Foundry Space can provision instances of paid service plans? (`1` for `true`, `0` for `false`) | `environment`, `deployment`, `space_id`, `space_name`, `organization_id` | | *metrics.namespace*_space_instance_memory_mb_limit | Maximum amount of memory (Mb) an application instance can have in a Cloud Foundry Space | `environment`, `deployment`, `space_id`, `space_name`, `organization_id` | @@ -318,7 +321,7 @@ The exporter returns the following `Spaces` metrics: The exporter returns the following `Stacks` metrics: | Metric | Description | Labels | -| ------ | ----------- | ------ | +|---------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| | *metrics.namespace*_stack_info | Labeled Cloud Foundry Stack information with a constant `1` value | `environment`, `deployment`, `stack_id`, `stack_name` | | *metrics.namespace*_stacks_scrapes_total | Total number of scrapes for Cloud Foundry Stacks | `environment`, `deployment` | | *metrics.namespace*_stacks_scrape_errors_total | Total number of scrape errors of Cloud Foundry Stacks | `environment`, `deployment` |